code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : Any ) -> tuple:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCamelCase )
elif element > pivot:
greater.append(_lowerCamelCase )
else:
equal.append(_lowerCamelCase )
return less, equal, greater
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : int ) -> Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowerCamelCase ) or index < 0:
return None
_lowerCAmelCase : List[str] = items[random.randint(0 ,len(_lowerCamelCase ) - 1 )]
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = _partition(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCamelCase ,_lowerCamelCase )
# must be in larger
else:
return quick_select(_lowerCamelCase ,index - (m + count) )
| 213
|
"""simple docstring"""
import os
import sys
import unittest
_a : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a : Dict = os.path.join(git_repo_path, 'src', 'transformers')
_a : str = '\n{0} = None\n'
_a : Tuple = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_a : Dict = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : int = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(a__ )
_lowerCAmelCase : Union[str, Any] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(a__ , """tokenizers""" )
_lowerCAmelCase : Union[str, Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(a__ , """tensorflow_text""" )
_lowerCAmelCase : Dict = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tokenizers""" )
_lowerCAmelCase : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tensorflow_text""" )
_lowerCAmelCase : Optional[Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(a__ , """sentencepiece_and_tokenizers_and_vision""" )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a__ )
self.assertIn("""tensorflow_text""" , a__ )
self.assertIn("""sentencepiece_and_tokenizers""" , a__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def __A ( self ):
_lowerCAmelCase : Dict = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a__ , """\nCONSTANT = None\n""" )
_lowerCAmelCase : List[Any] = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_lowerCAmelCase : Optional[int] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
_lowerCAmelCase : Optional[int] = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
_lowerCAmelCase : Dict = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a__ )
| 213
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __magic_name__ ( A : Dict, A : List[str], A : List[Any] ):
'''simple docstring'''
a = state_dict.pop(A )
a = val
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a = key.replace("backbone.0.body", "backbone.conv_encoder.model" )
a = value
else:
a = value
return new_state_dict
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:256, :]
a = in_proj_bias[:256]
a = in_proj_weight[256:512, :]
a = in_proj_bias[256:512]
a = in_proj_weight[-256:, :]
a = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:256, :]
a = in_proj_bias[:256]
a = in_proj_weight[256:512, :]
a = in_proj_bias[256:512]
a = in_proj_weight[-256:, :]
a = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
a = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a = in_proj_weight_cross_attn[:256, :]
a = in_proj_bias_cross_attn[:256]
a = in_proj_weight_cross_attn[256:512, :]
a = in_proj_bias_cross_attn[256:512]
a = in_proj_weight_cross_attn[-256:, :]
a = in_proj_bias_cross_attn[-256:]
def __magic_name__ ( A : List[Any], A : Optional[int] ):
'''simple docstring'''
a , a = image.size
a = max(A, A )
a = 800 if "detection" in checkpoint_url else 1000
a = target_max_size / current_max_size
a = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __magic_name__ ( A : int ):
'''simple docstring'''
a = F.to_tensor(A )
a = F.normalize(A, mean=[0.4_85, 0.4_56, 0.4_06], std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __magic_name__ ( A : Tuple, A : Dict, A : int ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
a = torch.hub.load_state_dict_from_url(A, map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(A, A, A )
a = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a = state_dict.pop(A )
a = val
# create HuggingFace model and load state dict
a = TableTransformerConfig(
backbone="resnet18", mask_loss_coefficient=1, dice_loss_coefficient=1, ce_loss_coefficient=1, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.4, class_cost=1, bbox_cost=5, giou_cost=2, )
if "detection" in checkpoint_url:
a = 15
a = 2
a = {0: "table", 1: "table rotated"}
a = idalabel
a = {v: k for k, v in idalabel.items()}
else:
a = 125
a = 6
a = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = DetrImageProcessor(
format="coco_detection", max_size=800 if "detection" in checkpoint_url else 1000 )
a = TableTransformerForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion
a = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
a = hf_hub_download(repo_id="nielsr/example-pdf", repo_type="dataset", filename=A )
a = Image.open(A ).convert("RGB" )
a = normalize(resize(A, A ) ).unsqueeze(0 )
a = model(A )
if "detection" in checkpoint_url:
a = (1, 15, 3)
a = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
a = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
a = (1, 125, 7)
a = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
a = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3], A, atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], A, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
a = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(A )
image_processor.push_to_hub(A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase : Dict = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 662
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __magic_name__ ( A : List[str] ):
'''simple docstring'''
a = {}
a = tokenizer(example["content"], truncation=A )["input_ids"]
a = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
__lowerCAmelCase : str = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : List[Any] = multiprocessing.cpu_count()
__lowerCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCAmelCase : List[Any] = time.time()
__lowerCAmelCase : str = load_dataset(args.dataset_name, split='train')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : int = time.time()
__lowerCAmelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCAmelCase : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 662
| 1
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = False, False, False
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =None
__a =True
__a =True
__a =None
# Automatically constructed
__a ='dict'
__a =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__a =field(default='Audio' , init=snake_case__ , repr=snake_case__ )
def __call__( self : Any ):
return self.pa_type
def UpperCamelCase__ ( self : List[str] , __a : List[Any] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(_snake_case , _snake_case ):
return {"bytes": None, "path": value}
elif isinstance(_snake_case , _snake_case ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_a = BytesIO()
sf.write(_snake_case , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_a = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_a = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_27_67
_a = BytesIO(bytes() )
sf.write(_snake_case , _snake_case , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Tuple , __a : int = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_a = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_a = xsplitext(_snake_case )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_a = token_per_repo_id or {}
_a = path.split("::" )[-1]
try:
_a = string_to_dict(_snake_case , config.HUB_DATASETS_URL )["repo_id"]
_a = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_a = None
with xopen(_snake_case , "rb" , use_auth_token=_snake_case ) as f:
_a = sf.read(_snake_case )
else:
_a = sf.read(_snake_case )
_a = array.T
if self.mono:
_a = librosa.to_mono(_snake_case )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_a = librosa.resample(_snake_case , orig_sr=_snake_case , target_sr=self.sampling_rate )
_a = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase__ ( self : int ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCamelCase__ ( self : Optional[Any] , __a : Tuple ):
if pa.types.is_string(storage.type ):
_a = pa.array([None] * len(_snake_case ) , type=pa.binary() )
_a = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a = pa.array([None] * len(_snake_case ) , type=pa.string() )
_a = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_a = pa.array([Audio().encode_example(_snake_case ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_a = storage.field("bytes" )
else:
_a = pa.array([None] * len(_snake_case ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_a = storage.field("path" )
else:
_a = pa.array([None] * len(_snake_case ) , type=pa.string() )
_a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_snake_case , self.pa_type )
def UpperCamelCase__ ( self : Dict , __a : Optional[Any] ):
@no_op_if_value_is_null
def path_to_bytes(__a : int ):
with xopen(_snake_case , "rb" ) as f:
_a = f.read()
return bytes_
_a = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_a = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_snake_case , self.pa_type )
| 692
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
snake_case = logging.get_logger(__name__)
snake_case = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''dpt'''
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=384 , _snake_case=16 , _snake_case=3 , _snake_case=False , _snake_case=True , _snake_case=[2, 5, 8, 11] , _snake_case="project" , _snake_case=[4, 2, 1, 0.5] , _snake_case=[96, 192, 384, 768] , _snake_case=256 , _snake_case=-1 , _snake_case=False , _snake_case=True , _snake_case=0.4 , _snake_case=255 , _snake_case=0.1 , _snake_case=[1, 1024, 24, 24] , _snake_case=[0, 1] , _snake_case=None , **_snake_case , ):
super().__init__(**_snake_case )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : int = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_lowerCAmelCase : str = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_lowerCAmelCase : List[Any] = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
logger.info("Initializing the config with a `BiT` backbone." )
_lowerCAmelCase : List[Any] = BitConfig(**_snake_case )
elif isinstance(_snake_case , _snake_case ):
_lowerCAmelCase : List[str] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_lowerCAmelCase : Union[str, Any] = backbone_featmap_shape
_lowerCAmelCase : Optional[int] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_lowerCAmelCase : Optional[Any] = readout_type
_lowerCAmelCase : Union[str, Any] = reassemble_factors
_lowerCAmelCase : Tuple = neck_hidden_sizes
_lowerCAmelCase : Union[str, Any] = fusion_hidden_size
_lowerCAmelCase : int = head_in_index
_lowerCAmelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Union[str, Any] = use_auxiliary_head
_lowerCAmelCase : str = auxiliary_loss_weight
_lowerCAmelCase : Any = semantic_loss_ignore_index
_lowerCAmelCase : Dict = semantic_classifier_dropout
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_lowerCAmelCase : Any = self.backbone_config.to_dict()
_lowerCAmelCase : str = self.__class__.model_type
return output
| 424
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 487
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ConsistencyModelPipeline
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def a_ ( self : Any , UpperCamelCase_ : int=False):
"""simple docstring"""
if class_cond:
__UpperCAmelCase : List[Any] = self.dummy_cond_unet
else:
__UpperCAmelCase : Optional[int] = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCAmelCase : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a_ ( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=0):
"""simple docstring"""
if str(UpperCamelCase_).startswith("mps"):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_)
else:
__UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : List[Any] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
__UpperCAmelCase : str = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Any = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : str = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : str = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : int = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : List[str] = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : int = None
__UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.get_dummy_components(class_cond=UpperCamelCase_)
__UpperCAmelCase : Tuple = ConsistencyModelPipeline(**UpperCamelCase_)
__UpperCAmelCase : int = pipe.to(UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 32, 32, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Dict = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def a_ ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=0 , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : List[str]=(1, 3, 64, 64)):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = torch.manual_seed(UpperCamelCase_)
__UpperCAmelCase : int = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
__UpperCAmelCase : int = self.get_fixed_latents(seed=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ , shape=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = latents
return inputs
def a_ ( self : Union[str, Any] , UpperCamelCase_ : int=0 , UpperCamelCase_ : Tuple="cpu" , UpperCamelCase_ : Tuple=torch.floataa , UpperCamelCase_ : Optional[Any]=(1, 3, 64, 64)):
"""simple docstring"""
if type(UpperCamelCase_) == str:
__UpperCAmelCase : Union[str, Any] = torch.device(UpperCamelCase_)
__UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_)
return latents
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Dict = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Dict = self.get_inputs()
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0888, 0.0881, 0.0666, 0.0479, 0.0292, 0.0195, 0.0201, 0.0163, 0.0254])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : Any = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : int = self.get_inputs()
__UpperCAmelCase : str = 1
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = np.array([0.0340, 0.0152, 0.0063, 0.0267, 0.0221, 0.0107, 0.0416, 0.0186, 0.0217])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
@require_torch_a
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : int = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Tuple = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCAmelCase : Optional[int] = np.array([0.1875, 0.1428, 0.1289, 0.2151, 0.2092, 0.1477, 0.1877, 0.1641, 0.1353])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@require_torch_a
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2")
__UpperCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCAmelCase : List[Any] = ConsistencyModelPipeline(unet=UpperCamelCase_ , scheduler=UpperCamelCase_)
pipe.to(torch_device=UpperCamelCase_ , torch_dtype=torch.floataa)
pipe.set_progress_bar_config(disable=UpperCamelCase_)
__UpperCAmelCase : Optional[int] = self.get_inputs(get_fixed_latents=UpperCamelCase_ , device=UpperCamelCase_)
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=UpperCamelCase_ , enable_math=UpperCamelCase_ , enable_mem_efficient=UpperCamelCase_):
__UpperCAmelCase : List[str] = pipe(**UpperCamelCase_).images
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[str] = np.array([0.1663, 0.1948, 0.2275, 0.1680, 0.1204, 0.1245, 0.1858, 0.1338, 0.2095])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 487
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
__snake_case : Union[str, Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case , __snake_case : int = emb.weight.shape
__snake_case : int = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__snake_case : int = emb.weight.data
return lin_layer
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : str=None ):
__snake_case : Optional[Any] = {}
for old_key in state_dict.keys():
__snake_case : Optional[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Dict = key.replace('moe_layer.experts.0' , F"""ffn.experts.expert_{expert_idx}""" )
else:
__snake_case : List[str] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
__snake_case : List[Any] = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
__snake_case : List[Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
__snake_case : Any = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
__snake_case : Optional[int] = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
__snake_case : int = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
__snake_case : Optional[Any] = key.replace('final_layer_norm' , 'ff_layer_norm' )
__snake_case : List[Any] = state_dict[old_key]
return new_dict
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str = WEIGHTS_NAME ):
__snake_case : Dict = []
__snake_case : int = 0
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
for expert in range(__UpperCAmelCase ):
__snake_case : Optional[int] = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__UpperCAmelCase ):
__snake_case : str = torch.load(__UpperCAmelCase )['model']
remove_ignore_keys_(__UpperCAmelCase )
__snake_case : Union[str, Any] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Dict = os.path.join(
__UpperCAmelCase , weights_name.replace('.bin' , F"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__UpperCAmelCase )[0]].dtype )
# Add the last block
__snake_case : Any = os.path.join(__UpperCAmelCase , weights_name.replace('.bin' , F"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
__snake_case : Dict = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__UpperCAmelCase )
__snake_case : Optional[int] = rename_fairseq_keys(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[int] = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__UpperCAmelCase ) == 1:
__snake_case : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__UpperCAmelCase , __UpperCAmelCase )
# Otherwise, let's build the index
__snake_case : Any = {}
for idx, shard in enumerate(__UpperCAmelCase ):
__snake_case : List[Any] = weights_name.replace('.bin' , F"""-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin""" )
__snake_case : Any = os.path.join(__UpperCAmelCase , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
for key in shard:
__snake_case : int = shard_file
# Add the metadata
__snake_case : Dict = {'total_size': total_size}
__snake_case : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' , encoding='utf-8' ) as f:
__snake_case : Optional[Any] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__magic_name__ = parser.parse_args()
__magic_name__ , __magic_name__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__magic_name__ = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__magic_name__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 576
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "xmod"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=("en_XX",) , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : Optional[Any] = intermediate_size
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : List[Any] = use_cache
__snake_case : Union[str, Any] = classifier_dropout
__snake_case : Optional[int] = pre_norm
__snake_case : int = adapter_reduction_factor
__snake_case : Tuple = adapter_layer_norm
__snake_case : Optional[int] = adapter_reuse_layer_norm
__snake_case : Dict = ln_before_adapter
__snake_case : int = list(_UpperCAmelCase )
__snake_case : str = default_language
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 576
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ = 1_6
snake_case_ = 3_2
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 1_6 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Tuple = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : List[str] = 8
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Any = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Optional[Any] = config["lr"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(config["batch_size"] )
set_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ : List[str] = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Any = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ : str = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
SCREAMING_SNAKE_CASE_ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 68
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = logging.get_verbosity()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowercase__ ) as cl:
logger.warning(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowercase__ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : int = os.getenv("TRANSFORMERS_VERBOSITY" , lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE_ : str = logging.get_verbosity()
self.assertEqual(
lowercase__ , lowercase__ , F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
SCREAMING_SNAKE_CASE_ : Optional[int] = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : List[Any] = logging.logging.getLogger()
with CaptureLogger(lowercase__ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __lowerCamelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE_ : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
SCREAMING_SNAKE_CASE_ : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowercase__ ) as cl:
logger.warning_advice(lowercase__ )
self.assertEqual(cl.out , msg + "\n" )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68
| 1
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 44
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A : Dict = logging.get_logger(__name__)
A : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> int:
for attribute in key.split('''.''' ):
__a = getattr(a__ , a__ )
if weight_type is not None:
__a = getattr(a__ , a__ ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[int]:
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
__a = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__a = True
if "*" in mapped_key:
__a = name.split(a__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , a__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "weight" in name:
__a = '''weight'''
elif "bias" in name:
__a = '''bias'''
else:
__a = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a__ )
@torch.no_grad()
def __lowerCAmelCase ( a__ , a__ , a__=None , a__=None , a__=True ) -> Optional[Any]:
if config_path is not None:
__a = HubertConfig.from_pretrained(a__ )
else:
__a = HubertConfig()
if is_finetuned:
if dict_path:
__a = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a = target_dict.pad_index
__a = target_dict.bos_index
__a = target_dict.eos_index
__a = len(target_dict.symbols )
__a = os.path.join(a__ , '''vocab.json''' )
if not os.path.isdir(a__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , a__ )
__a = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=a__ , )
__a = True if config.feat_extract_norm == '''layer''' else False
__a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
__a = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
__a = HubertForCTC(a__ )
else:
__a = HubertModel(a__ )
if is_finetuned:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a = model[0].eval()
recursively_load_weights(a__ , a__ , a__ )
hf_wavavec.save_pretrained(a__ )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 219
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
SCREAMING_SNAKE_CASE_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 370
|
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> None:
if point:
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
for item in point:
if not isinstance(SCREAMING_SNAKE_CASE__, (int, float) ):
a_ : int = (
"Expected a list of numbers as input, found "
F"""{type(SCREAMING_SNAKE_CASE__ ).__name__}"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
a_ : Dict = F"""Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError("Missing an input" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> float:
_validate_point(SCREAMING_SNAKE_CASE__ )
_validate_point(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
| 1
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> int:
for attribute in key.split("." ):
UpperCAmelCase_: Any = getattr(_a ,_a )
if weight_type is not None:
UpperCAmelCase_: int = getattr(_a ,_a ).shape
else:
UpperCAmelCase_: List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase_: Dict = value
elif weight_type == "weight_g":
UpperCAmelCase_: str = value
elif weight_type == "weight_v":
UpperCAmelCase_: Tuple = value
elif weight_type == "bias":
UpperCAmelCase_: Union[str, Any] = value
else:
UpperCAmelCase_: Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase ( _a ,_a ) -> Union[str, Any]:
UpperCAmelCase_: Dict = []
UpperCAmelCase_: List[Any] = fairseq_model.state_dict()
UpperCAmelCase_: Tuple = hf_model.feature_extractor
UpperCAmelCase_: List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
UpperCAmelCase_: Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
_a ,_a ,_a ,_a ,hf_model.config.feat_extract_norm == "group" ,)
UpperCAmelCase_: Any = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_a ,_a ,_a ,_a )
UpperCAmelCase_: Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase_: int = True
if "*" in mapped_key:
UpperCAmelCase_: Union[str, Any] = name.split(_a )[0].split("." )[-2]
UpperCAmelCase_: Optional[int] = mapped_key.replace("*" ,_a )
if "weight_g" in name:
UpperCAmelCase_: int = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_: Tuple = "weight_v"
elif "bias" in name:
UpperCAmelCase_: List[str] = "bias"
elif "weight" in name:
UpperCAmelCase_: Dict = "weight"
else:
UpperCAmelCase_: Any = None
set_recursively(_a ,_a ,_a ,_a ,_a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Optional[int]:
UpperCAmelCase_: Tuple = full_name.split("conv_layers." )[-1]
UpperCAmelCase_: Union[str, Any] = name.split("." )
UpperCAmelCase_: Any = int(items[0] )
UpperCAmelCase_: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase_: Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase_: Union[str, Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase_: Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase_: int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
def lowercase ( _a ,_a ,_a ,_a ) -> int:
UpperCAmelCase_: List[str] = full_name.split("adaptor." )[-1]
UpperCAmelCase_: Dict = name.split("." )
if items[1].isdigit():
UpperCAmelCase_: List[Any] = int(items[1] )
else:
UpperCAmelCase_: Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
UpperCAmelCase_: List[Any] = value
logger.info(f"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
UpperCAmelCase_: Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
UpperCAmelCase_: List[str] = value
logger.info(f"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
UpperCAmelCase_: Tuple = value
logger.info(f"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(_a ,_a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
UpperCAmelCase_: Union[str, Any] = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
UpperCAmelCase_: str = value
logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(_a )
def lowercase ( _a ) -> List[str]:
UpperCAmelCase_ , UpperCAmelCase_: int = emb.weight.shape
UpperCAmelCase_: Any = nn.Linear(_a ,_a ,bias=_a )
UpperCAmelCase_: List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase ( _a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,_a ,) -> Optional[Any]:
UpperCAmelCase_: Any = WavaVecaConfig.from_pretrained(
_a ,add_adapter=_a ,adapter_stride=_a ,adapter_kernel_size=_a ,use_auth_token=_a ,output_hidden_size=_a ,)
UpperCAmelCase_: List[Any] = MBartConfig.from_pretrained(_a )
# load model
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} ,)
UpperCAmelCase_: str = model[0].eval()
# load feature extractor
UpperCAmelCase_: Dict = WavaVecaFeatureExtractor.from_pretrained(_a ,use_auth_token=_a )
# set weights for wav2vec2 encoder
UpperCAmelCase_: str = WavaVecaModel(_a )
recursively_load_weights_wavaveca(model.encoder ,_a )
# load decoder weights
UpperCAmelCase_: Any = MBartForCausalLM(_a )
UpperCAmelCase_ , UpperCAmelCase_: List[str] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() ,strict=_a )
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCAmelCase_: Tuple = SpeechEncoderDecoderModel(encoder=_a ,decoder=_a )
UpperCAmelCase_: Union[str, Any] = False
UpperCAmelCase_: Optional[Any] = MBartaaTokenizer(_a )
tokenizer.save_pretrained(_a )
UpperCAmelCase_: List[str] = hf_wavavec.config.to_dict()
UpperCAmelCase_: Tuple = tokenizer.pad_token_id
UpperCAmelCase_: Dict = tokenizer.bos_token_id
UpperCAmelCase_: List[Any] = tokenizer.eos_token_id
UpperCAmelCase_: List[Any] = "mbart50"
UpperCAmelCase_: str = "wav2vec2"
UpperCAmelCase_: Union[str, Any] = tokenizer.eos_token_id
UpperCAmelCase_: Optional[int] = 250004
UpperCAmelCase_: List[str] = tokenizer.eos_token_id
UpperCAmelCase_: int = SpeechEncoderDecoderConfig.from_dict(_a )
hf_wavavec.save_pretrained(_a )
feature_extractor.save_pretrained(_a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_0004, type=int, help="""`decoder_start_token_id` of model config""")
_lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 137
|
def lowercase ( _a ,_a ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCAmelCase_: str = str(bin(_a ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase ( _a ,_a ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCAmelCase_: Union[str, Any] = str(bin(_a ) )[2:]
if shift_amount >= len(_a ):
return "0b0"
UpperCAmelCase_: List[str] = binary_number[: len(_a ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase ( _a ,_a ) -> str:
if number >= 0: # Get binary representation of positive number
UpperCAmelCase_: Dict = "0" + str(bin(_a ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase_: Optional[int] = len(bin(_a )[3:] ) # Find 2's complement of number
UpperCAmelCase_: Dict = bin(abs(_a ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_: Union[str, Any] = (
"1" + "0" * (binary_number_length - len(_a )) + binary_number
)
if shift_amount >= len(_a ):
return "0b" + binary_number[0] * len(_a )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(_a ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 137
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _A ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
with open(a_ ) as metadata_file:
lowerCamelCase__ = json.load(a_ )
lowerCamelCase__ = LukeConfig(use_entity_aware_attention=a_ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCamelCase__ = torch.load(a_ , map_location="""cpu""" )
# Load the entity vocab file
lowerCamelCase__ = load_entity_vocab(a_ )
lowerCamelCase__ = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase__ = AddedToken("""<ent>""" , lstrip=a_ , rstrip=a_ )
lowerCamelCase__ = AddedToken("""<ent2>""" , lstrip=a_ , rstrip=a_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(a_ )
with open(os.path.join(a_ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(a_ , a_ )
lowerCamelCase__ = LukeTokenizer.from_pretrained(a_ )
# Initialize the embeddings of the special tokens
lowerCamelCase__ = state_dict['''embeddings.word_embeddings.weight''']
lowerCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowerCamelCase__ = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowerCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase__ = f"""encoder.layer.{layer_index}.attention.self."""
lowerCamelCase__ = state_dict[prefix + matrix_name]
lowerCamelCase__ = state_dict[prefix + matrix_name]
lowerCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase__ = state_dict['''entity_embeddings.entity_embeddings.weight''']
lowerCamelCase__ = entity_emb[entity_vocab['''[MASK]''']]
lowerCamelCase__ = LukeModel(config=a_ ).eval()
lowerCamelCase__ = model.load_state_dict(a_ , strict=a_ )
if not (len(a_ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {', '.join(a_ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
lowerCamelCase__ = LukeTokenizer.from_pretrained(a_ , task="""entity_classification""" )
lowerCamelCase__ = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
lowerCamelCase__ = (39, 42)
lowerCamelCase__ = tokenizer(a_ , entity_spans=[span] , add_prefix_space=a_ , return_tensors="""pt""" )
lowerCamelCase__ = model(**a_ )
# Verify word hidden states
if model_size == "large":
lowerCamelCase__ = torch.Size((1, 42, 1024) )
lowerCamelCase__ = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
lowerCamelCase__ = torch.Size((1, 42, 768) )
lowerCamelCase__ = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCamelCase__ = torch.Size((1, 1, 1024) )
lowerCamelCase__ = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
lowerCamelCase__ = torch.Size((1, 1, 768) )
lowerCamelCase__ = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , a_ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(a_ ) )
model.save_pretrained(a_ )
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = {}
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(a_ ):
lowerCamelCase__ = line.rstrip().split("""\t""" )
lowerCamelCase__ = index
return entity_vocab
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__magic_name__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 705
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Distribution , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=0 ):
lowerCamelCase__ = 1.0 if scale is None else scale
lowerCamelCase__ = 0.0 if loc is None else loc
super().__init__(SCREAMING_SNAKE_CASE_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=SCREAMING_SNAKE_CASE_ )] )
@property
def __UpperCAmelCase ( self : Dict ):
return self.base_dist.mean * self.scale + self.loc
@property
def __UpperCAmelCase ( self : List[str] ):
return self.base_dist.variance * self.scale**2
@property
def __UpperCAmelCase ( self : int ):
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Callable[..., Tuple[torch.Tensor]] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = args_dim
lowerCamelCase__ = nn.ModuleList([nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for dim in args_dim.values()] )
lowerCamelCase__ = domain_map
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = [proj(SCREAMING_SNAKE_CASE_ ) for proj in self.proj]
return self.domain_map(*SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
super().__init__()
lowerCamelCase__ = function
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , *SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.function(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ :
snake_case = 42
snake_case = 42
snake_case = 42
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int = 1 ):
lowerCamelCase__ = dim
lowerCamelCase__ = {k: dim * self.args_dim[k] for k in self.args_dim}
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if self.dim == 1:
return self.distribution_class(*SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(*SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , ):
lowerCamelCase__ = self._base_distribution(SCREAMING_SNAKE_CASE_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(SCREAMING_SNAKE_CASE_ , loc=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , event_dim=self.event_dim )
@property
def __UpperCAmelCase ( self : Optional[int] ):
return () if self.dim == 1 else (self.dim,)
@property
def __UpperCAmelCase ( self : List[Any] ):
return len(self.event_shape )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
return 0.0
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ):
return ParameterProjection(
in_features=SCREAMING_SNAKE_CASE_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __UpperCAmelCase ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : torch.Tensor ):
return (x + torch.sqrt(torch.square(SCREAMING_SNAKE_CASE_ ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"df": 1, "loc": 1, "scale": 1}
snake_case = StudentT
@classmethod
def __UpperCAmelCase ( cls : Dict , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase__ = 2.0 + cls.squareplus(SCREAMING_SNAKE_CASE_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"loc": 1, "scale": 1}
snake_case = Normal
@classmethod
def __UpperCAmelCase ( cls : Tuple , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = {"total_count": 1, "logits": 1}
snake_case = NegativeBinomial
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor ):
lowerCamelCase__ = cls.squareplus(SCREAMING_SNAKE_CASE_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ )
else:
return Independent(self.distribution_class(total_count=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ ) , 1 )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.Tensor] = None ):
lowerCamelCase__ , lowerCamelCase__ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 258
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : Any ):
'''simple docstring'''
lowercase_ = data
lowercase_ = None
class _a :
"""simple docstring"""
def __init__( self : List[str] ):
'''simple docstring'''
lowercase_ = None
lowercase_ = None
def __iter__( self : Tuple ):
'''simple docstring'''
lowercase_ = self.head
while self.head:
yield node.data
lowercase_ = node.next
if node == self.head:
break
def __len__( self : List[str] ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : List[str] ):
'''simple docstring'''
return "->".join(str(lowercase_ ) for item in iter(self ) )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , lowercase_ )
def lowerCamelCase__ ( self : Tuple , lowercase_ : Any ):
'''simple docstring'''
self.insert_nth(0 , lowercase_ )
def lowerCamelCase__ ( self : int , lowercase_ : int , lowercase_ : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
lowercase_ = Node(lowercase_ )
if self.head is None:
lowercase_ = new_node # first node points itself
lowercase_ = lowercase_ = new_node
elif index == 0: # insert at head
lowercase_ = self.head
lowercase_ = lowercase_ = new_node
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = new_node
if index == len(self ) - 1: # insert at tail
lowercase_ = new_node
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return self.delete_nth(0 )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
lowercase_ = self.head
if self.head == self.tail: # just one node
lowercase_ = lowercase_ = None
elif index == 0: # delete head node
lowercase_ = self.tail.next.next
lowercase_ = self.head.next
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase_ = temp
return delete_node.data
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self ) == 0
def A_ ( ) ->None:
lowercase_ = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE_ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE_ , i + 1 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE_ ) == "->".join(str(SCREAMING_SNAKE_CASE_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
lowercase_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
elif "subsample" in key:
lowercase_ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Dict:
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
lowercase_ = emb.weight.data
return lin_layer
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
lowercase_ = mam_aaa["""args"""]
lowercase_ = mam_aaa["""model"""]
lowercase_ = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
rename_keys(SCREAMING_SNAKE_CASE_ )
lowercase_ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowercase_ = args.share_decoder_input_output_embed
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split(""",""" )]
lowercase_ = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_00 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , )
lowercase_ = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
lowercase_ , lowercase_ = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase_ = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 451
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase : Dict):
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h), resample=PIL_INTERPOLATION['''lanczos'''])
UpperCamelCase = np.array(_UpperCAmelCase).astype(np.floataa) / 2_5_5.0
UpperCamelCase = image[None].transpose(0, 3, 1, 2)
UpperCamelCase = torch.from_numpy(_UpperCAmelCase)
return 2.0 * image - 1.0
class lowercase__ ( snake_case_ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1_0_0 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase__ )}' )
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase = preprocess(lowerCamelCase__ )
UpperCamelCase , UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
UpperCamelCase = image.to(device=self.device , dtype=lowerCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(lowerCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase = torch.clamp(lowerCamelCase__ , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 350
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Tuple = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
snake_case_ : Union[str, Any] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def __snake_case ( _UpperCAmelCase : List[str], _UpperCAmelCase : List[str]):
UpperCamelCase = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase = int(re.match(R'''.*layer_(\d*).*''', _UpperCAmelCase)[1])
layer_number -= 3
return f'h.{layer_number}.' + key
def __snake_case ( _UpperCAmelCase : str):
if dtype == torch.bool:
return 1 / 8
UpperCamelCase = re.search(R'''[^\d](\d+)$''', str(_UpperCAmelCase))
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.')
UpperCamelCase = int(bit_search.groups()[0])
return bit_size // 8
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int]):
# Construct model
if bloom_config_file == "":
UpperCamelCase = BloomConfig()
else:
UpperCamelCase = BloomConfig.from_json_file(_UpperCAmelCase)
if shard_model:
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = {'''weight_map''': {}, '''metadata''': {}}
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = BloomConfig()
for j, file in enumerate(_UpperCAmelCase):
print('''Processing file: {}'''.format(_UpperCAmelCase))
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
torch.save(
_UpperCAmelCase, os.path.join(
_UpperCAmelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5)), ), )
for key in tensors.keys():
UpperCamelCase = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
UpperCamelCase = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1).zfill(5), str(len(_UpperCAmelCase)).zfill(5))
UpperCamelCase = BloomConfig()
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase = total_size
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
with open(os.path.join(_UpperCAmelCase, WEIGHTS_NAME + '''.index.json'''), '''w''', encoding='''utf-8''') as f:
UpperCamelCase = json.dumps(_UpperCAmelCase, indent=2, sort_keys=_UpperCAmelCase) + '''\n'''
f.write(_UpperCAmelCase)
else:
UpperCamelCase = BloomModel(_UpperCAmelCase)
UpperCamelCase = os.listdir(_UpperCAmelCase)
UpperCamelCase = sorted(filter(lambda _UpperCAmelCase: s.startswith('''layer''') and "model_00" in s, _UpperCAmelCase))
UpperCamelCase = None
for i, file in enumerate(_UpperCAmelCase):
UpperCamelCase = None
for i in range(_UpperCAmelCase):
# load all TP files
UpperCamelCase = file.replace('''model_00''', f'model_0{i}')
UpperCamelCase = torch.load(os.path.join(_UpperCAmelCase, _UpperCAmelCase), map_location='''cpu''')
# Rename keys in the transformers names
UpperCamelCase = list(temp.keys())
for key in keys:
UpperCamelCase = temp.pop(_UpperCAmelCase)
if tensors is None:
UpperCamelCase = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase = torch.cat([tensors[key], temp[key]], dim=_UpperCAmelCase)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCAmelCase) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
UpperCamelCase = tensors[key] / pretraining_tp
UpperCamelCase = model.load_state_dict(_UpperCAmelCase, strict=_UpperCAmelCase)
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
UpperCamelCase = set(other_keys.missing_keys)
else:
UpperCamelCase = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(_UpperCAmelCase, exist_ok=_UpperCAmelCase)
UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}')
if config.torch_dtype is not None:
UpperCamelCase = model.to(config.torch_dtype)
torch.save(model.state_dict(), _UpperCAmelCase)
print(f'Save configuration file to {pytorch_config_dump_path}')
with open(_UpperCAmelCase, '''w''', encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
snake_case_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
snake_case_ : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 350
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''google/electra-small-generator''': 5_12,
'''google/electra-base-generator''': 5_12,
'''google/electra-large-generator''': 5_12,
'''google/electra-small-discriminator''': 5_12,
'''google/electra-base-discriminator''': 5_12,
'''google/electra-large-discriminator''': 5_12,
}
_lowercase = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __A ( A_ ):
UpperCamelCase :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase :Any = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase :Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase :Optional[Any] = ElectraTokenizer
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
lowerCamelCase__ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __magic_name__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __magic_name__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __magic_name__ ) != tokenize_chinese_chars
):
lowerCamelCase__ : Optional[int] = getattr(__magic_name__ , normalizer_state.pop("""type""" ) )
lowerCamelCase__ : List[str] = do_lower_case
lowerCamelCase__ : Dict = strip_accents
lowerCamelCase__ : List[str] = tokenize_chinese_chars
lowerCamelCase__ : List[str] = normalizer_class(**__magic_name__ )
lowerCamelCase__ : Any = do_lower_case
def _snake_case (self , __magic_name__ , __magic_name__=None ):
lowerCamelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case (self , __magic_name__ , __magic_name__ = None ):
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 157
|
from statistics import mean, stdev
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Dict = min(UpperCamelCase )
lowerCamelCase__ : List[str] = max(UpperCamelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCamelCase ) for x in data]
def _A (UpperCamelCase : list , UpperCamelCase : int = 3 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = mean(UpperCamelCase )
lowerCamelCase__ : Tuple = stdev(UpperCamelCase )
# standardize data
return [round((x - mu) / (sigma) , UpperCamelCase ) for x in data]
| 157
| 1
|
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 1_000_000 ):
snake_case__ = 0
snake_case__ = 0
snake_case__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 530
|
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = SwinConfig()
snake_case__ = swin_name.split("_" )
snake_case__ = name_split[1]
snake_case__ = int(name_split[4] )
snake_case__ = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ = 96
snake_case__ = (2, 2, 6, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ = 96
snake_case__ = (2, 2, 18, 2)
snake_case__ = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ = 128
snake_case__ = (2, 2, 18, 2)
snake_case__ = (4, 8, 16, 32)
else:
snake_case__ = 192
snake_case__ = (2, 2, 18, 2)
snake_case__ = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ = 21_841
else:
snake_case__ = 1_000
snake_case__ = "huggingface/label-files"
snake_case__ = "imagenet-1k-id2label.json"
snake_case__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
snake_case__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = img_size
snake_case__ = num_classes
snake_case__ = embed_dim
snake_case__ = depths
snake_case__ = num_heads
snake_case__ = window_size
return config
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if "patch_embed.proj" in name:
snake_case__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case__ = "encoder." + name
if "attn.proj" in name:
snake_case__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case__ = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
snake_case__ = "layernorm.weight"
if name == "norm.bias":
snake_case__ = "layernorm.bias"
if "head" in name:
snake_case__ = name.replace("head" , "classifier" )
else:
snake_case__ = "swin." + name
return name
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ = key.split("." )
snake_case__ = int(key_split[1] )
snake_case__ = int(key_split[3] )
snake_case__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[
dim : dim * 2, :
]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val[
:dim
]
snake_case__ = val[
dim : dim * 2
]
snake_case__ = val[
-dim:
]
else:
snake_case__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
snake_case__ = get_swin_config(__lowerCAmelCase )
snake_case__ = SwinForImageClassification(__lowerCAmelCase )
model.eval()
snake_case__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
snake_case__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case__ = image_processor(images=__lowerCAmelCase , return_tensors="pt" )
snake_case__ = timm_model(inputs["pixel_values"] )
snake_case__ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__magic_name__ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 530
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase__ = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowerCAmelCase__ = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
lowerCAmelCase__ = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[
'''https://github.com/m-popovic/chrF''',
] ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int = CHRF.CHAR_ORDER ,lowercase__ : int = CHRF.WORD_ORDER ,lowercase__ : int = CHRF.BETA ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,):
__lowercase = len(references[0] )
if any(len(lowercase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowercase = [[refs[i] for refs in references] for i in range(lowercase__ )]
__lowercase = CHRF(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = sb_chrf.corpus_score(lowercase__ ,lowercase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 41
|
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting'''
lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = 50
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = num_samples * [init_image]
lowercase__ = num_samples * [mask_image]
lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# shard inputs and rng
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = pipeline(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ )
lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Any = 'umt5'
__SCREAMING_SNAKE_CASE : Optional[int] = ['past_key_values']
def __init__( self : Union[str, Any] , lowercase__ : Optional[int]=25_01_12 , lowercase__ : int=5_12 , lowercase__ : List[Any]=64 , lowercase__ : Optional[Any]=10_24 , lowercase__ : List[Any]=8 , lowercase__ : Any=None , lowercase__ : Any=6 , lowercase__ : List[str]=32 , lowercase__ : Union[str, Any]=1_28 , lowercase__ : Optional[Any]=0.1 , lowercase__ : Union[str, Any]=1e-6 , lowercase__ : int=1.0 , lowercase__ : str="gated-gelu" , lowercase__ : Optional[int]=True , lowercase__ : int=True , lowercase__ : Optional[int]="T5Tokenizer" , lowercase__ : Optional[Any]=True , lowercase__ : List[Any]=0 , lowercase__ : Tuple=1 , lowercase__ : Dict=0 , **lowercase__ : Optional[int] , ) ->Optional[int]:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowercase__ , tokenizer_class=lowercase__ , tie_word_embeddings=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
_lowercase = vocab_size
_lowercase = d_model
_lowercase = d_kv
_lowercase = d_ff
_lowercase = num_layers
_lowercase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowercase = num_heads
_lowercase = relative_attention_num_buckets
_lowercase = relative_attention_max_distance
_lowercase = dropout_rate
_lowercase = layer_norm_epsilon
_lowercase = initializer_factor
_lowercase = feed_forward_proj
_lowercase = use_cache
_lowercase = self.feed_forward_proj.split("""-""")
_lowercase = act_info[-1]
_lowercase = act_info[0] == """gated"""
if len(lowercase__) > 1 and act_info[0] != "gated" or len(lowercase__) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
if feed_forward_proj == "gated-gelu":
_lowercase = """gelu_new"""
@property
def _UpperCAmelCase ( self : Tuple) ->str:
"""simple docstring"""
return self.d_model
@property
def _UpperCAmelCase ( self : str) ->List[Any]:
"""simple docstring"""
return self.num_heads
@property
def _UpperCAmelCase ( self : List[str]) ->str:
"""simple docstring"""
return self.num_layers
class __a ( _snake_case ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCAmelCase ( self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_lowercase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
_lowercase = """past_encoder_sequence + sequence"""
_lowercase = {0: """batch"""}
_lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_lowercase = {0: """batch""", 1: """decoder_sequence"""}
_lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction="""inputs""")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCAmelCase ( self : Optional[int]) ->int:
"""simple docstring"""
return 13
@property
def _UpperCAmelCase ( self : Dict) ->float:
"""simple docstring"""
return 5e-4
| 572
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'owlvit_text_model'
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any]=4_94_08 , lowercase__ : List[str]=5_12 , lowercase__ : Optional[Any]=20_48 , lowercase__ : List[str]=12 , lowercase__ : List[Any]=8 , lowercase__ : List[Any]=16 , lowercase__ : List[str]="quick_gelu" , lowercase__ : Tuple=1e-5 , lowercase__ : int=0.0 , lowercase__ : str=0.02 , lowercase__ : List[Any]=1.0 , lowercase__ : int=0 , lowercase__ : int=4_94_06 , lowercase__ : int=4_94_07 , **lowercase__ : Any , ) ->Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = max_position_embeddings
_lowercase = hidden_act
_lowercase = layer_norm_eps
_lowercase = attention_dropout
_lowercase = initializer_range
_lowercase = initializer_factor
@classmethod
def _UpperCAmelCase ( cls : List[Any] , lowercase__ : Union[str, os.PathLike] , **lowercase__ : Tuple) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""") == "owlvit":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : str = 'owlvit_vision_model'
def __init__( self : Optional[int] , lowercase__ : Dict=7_68 , lowercase__ : Tuple=30_72 , lowercase__ : List[str]=12 , lowercase__ : str=12 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=7_68 , lowercase__ : Union[str, Any]=32 , lowercase__ : Dict="quick_gelu" , lowercase__ : Tuple=1e-5 , lowercase__ : List[Any]=0.0 , lowercase__ : List[str]=0.02 , lowercase__ : List[Any]=1.0 , **lowercase__ : List[Any] , ) ->int:
"""simple docstring"""
super().__init__(**lowercase__)
_lowercase = hidden_size
_lowercase = intermediate_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = num_channels
_lowercase = image_size
_lowercase = patch_size
_lowercase = hidden_act
_lowercase = layer_norm_eps
_lowercase = attention_dropout
_lowercase = initializer_range
_lowercase = initializer_factor
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowercase__ : Union[str, os.PathLike] , **lowercase__ : Optional[int]) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""") == "owlvit":
_lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
class __a ( _snake_case ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'owlvit'
__SCREAMING_SNAKE_CASE : Tuple = True
def __init__( self : str , lowercase__ : List[str]=None , lowercase__ : int=None , lowercase__ : str=5_12 , lowercase__ : Any=2.6592 , lowercase__ : List[str]=True , **lowercase__ : str , ) ->Tuple:
"""simple docstring"""
super().__init__(**lowercase__)
if text_config is None:
_lowercase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""")
if vision_config is None:
_lowercase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""")
_lowercase = OwlViTTextConfig(**lowercase__)
_lowercase = OwlViTVisionConfig(**lowercase__)
_lowercase = projection_dim
_lowercase = logit_scale_init_value
_lowercase = return_dict
_lowercase = 1.0
@classmethod
def _UpperCAmelCase ( cls : int , lowercase__ : Union[str, os.PathLike] , **lowercase__ : List[str]) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowercase__)
_lowercase , _lowercase = cls.get_config_dict(lowercase__ , **lowercase__)
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(lowercase__ , **lowercase__)
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , lowercase__ : Dict , lowercase__ : Dict , **lowercase__ : str) ->Union[str, Any]:
"""simple docstring"""
_lowercase = {}
_lowercase = text_config
_lowercase = vision_config
return cls.from_dict(lowercase__ , **lowercase__)
def _UpperCAmelCase ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowercase = copy.deepcopy(self.__dict__)
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
class __a ( _snake_case ):
@property
def _UpperCAmelCase ( self : List[str]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
])
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
])
@property
def _UpperCAmelCase ( self : str) ->float:
"""simple docstring"""
return 1e-4
def _UpperCAmelCase ( self : str , lowercase__ : "ProcessorMixin" , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
"""simple docstring"""
_lowercase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=lowercase__ , seq_length=lowercase__ , framework=lowercase__)
_lowercase = super().generate_dummy_inputs(
processor.image_processor , batch_size=lowercase__ , framework=lowercase__)
return {**text_input_dict, **image_input_dict}
@property
def _UpperCAmelCase ( self : Any) ->int:
"""simple docstring"""
return 14
| 572
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCAmelCase ( _UpperCamelCase ):
__lowerCamelCase : int = ["pixel_values"]
def __init__( self : Optional[Any] , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 2_55 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : List[str] , ) -> None:
'''simple docstring'''
super().__init__(**a_ )
a__ : Union[str, Any] = size if size is not None else {"shortest_edge": 2_24}
a__ : Dict = get_size_dict(a_ , default_to_square=a_ )
a__ : Optional[int] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
a__ : Any = get_size_dict(a_ , default_to_square=a_ , param_name="crop_size" )
a__ : Union[str, Any] = do_resize
a__ : Optional[Any] = size
a__ : Optional[int] = resample
a__ : Dict = do_center_crop
a__ : int = crop_size
a__ : Optional[int] = do_rescale
a__ : Optional[int] = rescale_factor
a__ : int = do_normalize
a__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a__ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
a__ : str = do_convert_rgb
def UpperCAmelCase ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
a__ : int = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
a__ : List[str] = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : Optional[int] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : int , ) -> np.ndarray:
'''simple docstring'''
a__ : Union[str, Any] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def UpperCAmelCase ( self : List[str] , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Dict , ) -> str:
'''simple docstring'''
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : str , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def UpperCAmelCase ( self : str , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Any , ) -> PIL.Image.Image:
'''simple docstring'''
a__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
a__ : int = size if size is not None else self.size
a__ : List[str] = get_size_dict(a_ , param_name="size" , default_to_square=a_ )
a__ : List[str] = resample if resample is not None else self.resample
a__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ : List[str] = crop_size if crop_size is not None else self.crop_size
a__ : str = get_size_dict(a_ , param_name="crop_size" , default_to_square=a_ )
a__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ : int = do_normalize if do_normalize is not None else self.do_normalize
a__ : Any = image_mean if image_mean is not None else self.image_mean
a__ : Tuple = image_std if image_std is not None else self.image_std
a__ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a__ : List[Any] = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a__ : List[str] = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
a__ : Any = [to_numpy_array(a_ ) for image in images]
if do_resize:
a__ : int = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
a__ : Union[str, Any] = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
a__ : Tuple = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
a__ : int = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
a__ : List[Any] = [to_channel_dimension_format(a_ , a_ ) for image in images]
a__ : Union[str, Any] = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 642
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((a__) , (a__)) : List[Any] = extended_euclid(lowerCAmelCase__ , a % b )
a__ : str = a // b
return (y, x - k * y)
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Tuple = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : List[str] = na * na
a__ : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
((a__) , (a__)) : Optional[Any] = extended_euclid(lowerCAmelCase__ , lowerCAmelCase__ )
if b < 0:
a__ : Optional[int] = (b % n + n) % n
return b
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
a__ , a__ : List[Any] = invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ ), invert_modulo(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Dict = na * na
a__ : Any = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 642
| 1
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class snake_case :
def __init__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = {}
def a_ ( self : Optional[int] , a__ : Tuple , a__ : Optional[Any] , a__ : Optional[Any]=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_A = [[w, v]]
if not self.graph.get(lowerCamelCase_ ):
_A = []
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def a_ ( self : int , a__ : str , a__ : Any ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
def a_ ( self : Union[str, Any] , a__ : Optional[Any]=-2 , a__ : Tuple=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a_ ( self : Optional[int] , a__ : List[str]=-1 ) -> Union[str, Any]:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def a_ ( self : Optional[int] , a__ : Dict=-2 ) -> List[str]:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : Union[str, Any] , a__ : Dict ) -> int:
'''simple docstring'''
_A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a_ ( self : Optional[int] , a__ : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : Optional[int] , a__ : List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
_A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return sorted_nodes
def a_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a_ ( self : Union[str, Any] , a__ : Optional[int]=-2 , a__ : Any=-1 ) -> int:
'''simple docstring'''
_A = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
_A = time()
return end - begin
def a_ ( self : Tuple , a__ : Optional[Any]=-2 ) -> Tuple:
'''simple docstring'''
_A = time()
self.bfs(lowerCamelCase_ )
_A = time()
return end - begin
class snake_case :
def __init__( self : Any ) -> Any:
'''simple docstring'''
_A = {}
def a_ ( self : Tuple , a__ : List[Any] , a__ : Union[str, Any] , a__ : str=1 ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_A = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_A = [[w, u]]
def a_ ( self : Union[str, Any] , a__ : List[Any] , a__ : int ) -> List[str]:
'''simple docstring'''
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase_ )
# the other way round
if self.graph.get(lowerCamelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase_ )
def a_ ( self : Tuple , a__ : List[str]=-2 , a__ : Union[str, Any]=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
_A = []
_A = []
if s == -2:
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return visited
def a_ ( self : List[str] , a__ : Tuple=-1 ) -> Any:
'''simple docstring'''
if c == -1:
_A = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_A = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 )
def a_ ( self : Tuple , a__ : Optional[int]=-2 ) -> Dict:
'''simple docstring'''
_A = deque()
_A = []
if s == -2:
_A = list(self.graph )[0]
d.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
while d:
_A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : int , a__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def a_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return list(lowerCamelCase_ )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = []
_A = []
_A = list(self.graph )[0]
stack.append(lowerCamelCase_ )
visited.append(lowerCamelCase_ )
_A = -2
_A = []
_A = s
_A = False
_A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_A = len(lowerCamelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_A = True
if len(lowerCamelCase_ ) != 0:
_A = stack[len(lowerCamelCase_ ) - 1]
else:
_A = False
indirect_parents.append(lowerCamelCase_ )
_A = s
_A = ss
# check if se have reached the starting point
if len(lowerCamelCase_ ) == 0:
return False
def a_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return list(self.graph )
def a_ ( self : Union[str, Any] , a__ : List[str]=-2 , a__ : List[str]=-1 ) -> List[str]:
'''simple docstring'''
_A = time()
self.dfs(lowerCamelCase_ , lowerCamelCase_ )
_A = time()
return end - begin
def a_ ( self : Dict , a__ : Any=-2 ) -> str:
'''simple docstring'''
_A = time()
self.bfs(lowerCamelCase_ )
_A = time()
return end - begin
| 710
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class snake_case ( unittest.TestCase , _UpperCamelCase):
def a_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_A = load_tool("text-classification" )
self.tool.setup()
_A = load_tool("text-classification" , remote=a__ )
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_A = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 621
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "nat"
lowercase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : str=3 , _lowerCAmelCase : Any=64 , _lowerCAmelCase : Union[str, Any]=[3, 4, 6, 5] , _lowerCAmelCase : List[str]=[2, 4, 8, 16] , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Any=3.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : Optional[int]=1E-5 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Optional[int] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = ['stem'] + [F"stage{idx}" for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 31
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
def __init__( self, *_a, **_a ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead.", _a, )
super().__init__(*_a, **_a )
| 693
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__magic_name__ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''RUCAIBox/mvp''': 1_0_2_4,
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[str] = MvpTokenizer
def __init__( self , a_=None , a_=None , a_=None , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , a_=True , **a_ , ):
super().__init__(
a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , )
lowerCamelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : List[str] = getattr(a_ , pre_tok_state.pop("type" ) )
lowerCamelCase_ : str = add_prefix_space
lowerCamelCase_ : Dict = pre_tok_class(**a_ )
lowerCamelCase_ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ : int = "post_processor"
lowerCamelCase_ : List[Any] = getattr(self.backend_tokenizer , a_ , a_ )
if tokenizer_component_instance:
lowerCamelCase_ : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase_ : List[Any] = tuple(state["cls"] )
lowerCamelCase_ : Any = False
if state.get("add_prefix_space" , a_ ) != add_prefix_space:
lowerCamelCase_ : Any = add_prefix_space
lowerCamelCase_ : List[Any] = True
if state.get("trim_offsets" , a_ ) != trim_offsets:
lowerCamelCase_ : Optional[Any] = trim_offsets
lowerCamelCase_ : List[Any] = True
if changes_to_apply:
lowerCamelCase_ : List[str] = getattr(a_ , state.pop("type" ) )
lowerCamelCase_ : int = component_class(**a_ )
setattr(self.backend_tokenizer , a_ , a_ )
@property
def _UpperCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , a_ ):
lowerCamelCase_ : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value
lowerCamelCase_ : Optional[int] = value
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Union[str, Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Optional[Any] = kwargs.get("is_split_into_words" , a_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : int = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ )
def _UpperCamelCase ( self , a_ , a_=None ):
lowerCamelCase_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Tuple = [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 713
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''EncodecFeatureExtractor'''
__UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
lowerCamelCase_ : Optional[Any] = self.feature_extractor
lowerCamelCase_ : Optional[int] = False
def _UpperCamelCase ( self , a_=None , a_=None , a_=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a_ , language=a_ , no_timestamps=a_ )
def __call__( self , *a_ , **a_ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a_ , **a_ )
lowerCamelCase_ : str = kwargs.pop("audio" , a_ )
lowerCamelCase_ : List[str] = kwargs.pop("sampling_rate" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("text" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : int = args[0]
lowerCamelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCamelCase_ : Dict = self.tokenizer(a_ , **a_ )
if audio is not None:
lowerCamelCase_ : Optional[Any] = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCamelCase_ : Dict = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCamelCase_ : int = audio_inputs["padding_mask"]
return inputs
def _UpperCamelCase ( self , *a_ , **a_ ):
lowerCamelCase_ : Dict = kwargs.pop("audio" , a_ )
lowerCamelCase_ : Optional[Any] = kwargs.pop("padding_mask" , a_ )
if len(a_ ) > 0:
lowerCamelCase_ : Optional[int] = args[0]
lowerCamelCase_ : Optional[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(a_ , padding_mask=a_ )
else:
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
def _UpperCamelCase ( self , a_ , a_ = None ):
lowerCamelCase_ : Any = to_numpy(a_ )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[str] = audio_values.shape
if padding_mask is None:
return list(a_ )
lowerCamelCase_ : Tuple = to_numpy(a_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCamelCase_ : List[str] = seq_len - padding_mask.shape[-1]
lowerCamelCase_ : int = 1 - self.feature_extractor.padding_value
lowerCamelCase_ : List[Any] = np.pad(a_ , ((0, 0), (0, difference)) , "constant" , constant_values=a_ )
lowerCamelCase_ : str = audio_values.tolist()
for i in range(a_ ):
lowerCamelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCamelCase_ : Dict = sliced_audio.reshape(a_ , -1 )
return audio_values
| 73
| 0
|
'''simple docstring'''
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowerCAmelCase : Dict = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "facebook/nllb-200-distilled-600M"
__magic_name__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
__magic_name__ = "translator"
__magic_name__ = AutoTokenizer
__magic_name__ = AutoModelForSeqaSeqLM
__magic_name__ = LANGUAGE_CODES
__magic_name__ = ["text", "text", "text"]
__magic_name__ = ["text"]
def a ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
_lowerCAmelCase : List[Any] = self.lang_to_code[src_lang]
_lowerCAmelCase : Any = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase_ , return_tensors='pt' , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.model.generate(**UpperCAmelCase_ )
def a ( self , snake_case__ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase_ )
| 444
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__ = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE__ = "3.0.12"
SCREAMING_SNAKE_CASE__ = None
def lowerCamelCase ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ) -> int:
lowercase__ = lock_file
return None
def __str__( self ) -> Union[str, Any]:
lowercase__ = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = lock
return None
def __enter__( self ) -> Optional[int]:
return self.lock
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.lock.release()
return None
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Tuple:
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(UpperCAmelCase_ ,UpperCAmelCase_ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def _a ( self ) -> List[str]:
return self._lock_file
@property
def _a ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = float(UpperCAmelCase_ )
return None
def _a ( self ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self ) -> Optional[int]:
raise NotImplementedError()
@property
def _a ( self ) -> Dict:
return self._lock_file_fd is not None
def _a ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self ,UpperCAmelCase_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.release()
return None
def __del__( self ) -> Union[str, Any]:
self.release(force=UpperCAmelCase_ )
return None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = os.path.basename(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = str(hash(UpperCAmelCase_ ) )
lowercase__ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
return path
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
lowercase__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(UpperCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> int:
lowercase__ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
try:
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_UN )
os.close(UpperCAmelCase_ )
return None
class snake_case (UpperCamelCase ):
def _a ( self ) -> Optional[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
lowercase__ = fd
return None
def _a ( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__ = None
if msvcrt:
SCREAMING_SNAKE_CASE__ = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__ = UnixFileLock
else:
SCREAMING_SNAKE_CASE__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 267
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _snake_case :
def __init__( self , a , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = 13
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 99
SCREAMING_SNAKE_CASE = 32
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 37
SCREAMING_SNAKE_CASE = 'gelu'
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 512
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 0.02
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = None
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , a , a , ) -> str:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = TFEsmModel(config=a)
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(a , encoder_hidden_states=a)
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE = model(a , attention_mask=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=a)
SCREAMING_SNAKE_CASE = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a) -> Dict:
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=a)
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : str = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : int = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : str = False
_lowercase : Tuple = False
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = TFEsmModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(a)
self.assertIsNotNone(a)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(a , a)
for k, v in name.items():
assert isinstance(a , tf.Variable)
else:
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]])
SCREAMING_SNAKE_CASE = model(a)[0]
SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape) , a)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2))
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
SCREAMING_SNAKE_CASE = model(a)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 718
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ : List[str] = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(A__ )
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''rag'''
_lowercase : Optional[int] = True
def __init__( self , a=None , a=True , a=None , a=None , a=None , a=None , a=None , a=" / " , a=" // " , a=5 , a=300 , a=768 , a=8 , a="wiki_dpr" , a="train" , a="compressed" , a=None , a=None , a=False , a=False , a=0.0 , a=True , a=False , a=False , a=False , a=True , a=None , **a , ) -> Optional[Any]:
super().__init__(
bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
SCREAMING_SNAKE_CASE = kwargs.pop('question_encoder')
SCREAMING_SNAKE_CASE = question_encoder_config.pop('model_type')
SCREAMING_SNAKE_CASE = kwargs.pop('generator')
SCREAMING_SNAKE_CASE = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = reduce_loss
SCREAMING_SNAKE_CASE = label_smoothing
SCREAMING_SNAKE_CASE = exclude_bos_score
SCREAMING_SNAKE_CASE = do_marginalize
SCREAMING_SNAKE_CASE = title_sep
SCREAMING_SNAKE_CASE = doc_sep
SCREAMING_SNAKE_CASE = n_docs
SCREAMING_SNAKE_CASE = max_combined_length
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = dataset_split
SCREAMING_SNAKE_CASE = index_name
SCREAMING_SNAKE_CASE = retrieval_vector_size
SCREAMING_SNAKE_CASE = retrieval_batch_size
SCREAMING_SNAKE_CASE = passages_path
SCREAMING_SNAKE_CASE = index_path
SCREAMING_SNAKE_CASE = use_dummy_dataset
SCREAMING_SNAKE_CASE = output_retrieved
SCREAMING_SNAKE_CASE = do_deduplication
SCREAMING_SNAKE_CASE = use_cache
if self.forced_eos_token_id is None:
SCREAMING_SNAKE_CASE = getattr(self.generator , 'forced_eos_token_id' , a)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.question_encoder.to_dict()
SCREAMING_SNAKE_CASE = self.generator.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 444
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case__ : str = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
snake_case__ : Optional[int] = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = GPTaTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ) -> Union[str, Any]:
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCamelCase_ = kwargs.pop('add_bos_token' , _UpperCAmelCase )
UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
UpperCamelCase_ = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = pre_tok_class(**_UpperCAmelCase )
UpperCamelCase_ = add_prefix_space
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
UpperCamelCase_ = kwargs.get('is_split_into_words' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> BatchEncoding:
UpperCamelCase_ = kwargs.get('is_split_into_words' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ) -> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> List[int]:
UpperCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
UpperCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 23
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def UpperCamelCase__ ( _lowercase : str ) -> List[Any]:
__UpperCAmelCase: List[str] = [False] * len(_lowercase )
__UpperCAmelCase: str = [-1] * len(_lowercase )
def dfs(_lowercase : Dict , _lowercase : Optional[int] ):
__UpperCAmelCase: Optional[int] = True
__UpperCAmelCase: Optional[int] = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 523
| 0
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE__ : list[float] ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = 0.0_0
SCREAMING_SNAKE_CASE__ : int = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE__ : int = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__snake_case )
first_sum += 1 / float(__snake_case )
index += 1
return 1 / first_sum
def _a ( SCREAMING_SNAKE_CASE__ : list[float] ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = 0.0_0
SCREAMING_SNAKE_CASE__ : Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE__ : int = f'''Resistor at index {index} has a negative value!'''
raise ValueError(__snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157
| 0
|
import random
def lowerCAmelCase__ ( _a : list , _a : Union[str, Any] ):
snake_case_ , snake_case_ , snake_case_ : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_a )
elif element > pivot:
greater.append(_a )
else:
equal.append(_a )
return less, equal, greater
def lowerCAmelCase__ ( _a : list , _a : int ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_a ) or index < 0:
return None
snake_case_ : int = items[random.randint(0 , len(_a ) - 1 )]
snake_case_ : Tuple = 0
snake_case_ , snake_case_ , snake_case_ : Optional[Any] = _partition(_a , _a )
snake_case_ : Any = len(_a )
snake_case_ : str = len(_a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_a , _a )
# must be in larger
else:
return quick_select(_a , index - (m + count) )
| 568
|
def lowerCAmelCase__ ( ):
snake_case_ : Optional[int] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_a )[-10:]
if __name__ == "__main__":
print(solution())
| 568
| 1
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
__A = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=snake_case , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=snake_case , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=snake_case )
return parser.parse_args()
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
__A = parse_args()
# Import training_script as a module.
__A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A = script_fpath.stem
__A = importlib.import_module(snake_case )
# Patch sys.argv
__A = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 341
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase ( snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F"{test_file} instead." )
__A = components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
__A = components[:-1] + [test_fn.replace('''.py''' , '''''' )]
__A = '''.'''.join(snake_case )
return test_module_path
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = get_module_path(snake_case )
__A = importlib.import_module(snake_case )
return test_module
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Any:
'''simple docstring'''
__A = []
__A = get_test_module(snake_case )
for attr in dir(snake_case ):
__A = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__A = getattr(snake_case , '''all_model_classes''' , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> str:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> List[str]:
'''simple docstring'''
__A = test_class()
if hasattr(snake_case , '''setUp''' ):
test.setUp()
__A = None
if hasattr(snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__A = test.model_tester.__class__
return model_tester
def __UpperCamelCase ( snake_case , snake_case ) -> Dict:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case , snake_case ) -> Union[str, Any]:
'''simple docstring'''
__A = get_test_classes_for_model(snake_case , snake_case )
__A = []
for test_class in test_classes:
__A = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_test_classes(snake_case )
__A = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase ( snake_case ) -> Optional[Any]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase ( snake_case ) -> Optional[int]:
'''simple docstring'''
__A = get_model_classes(snake_case )
__A = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase ( snake_case ) -> Tuple:
'''simple docstring'''
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 341
| 1
|
'''simple docstring'''
import math
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return math.sqrt(__UpperCAmelCase ) * math.sqrt(__UpperCAmelCase ) == num
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = n
while left <= right:
__SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__SCREAMING_SNAKE_CASE = mid - 1
else:
__SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
|
"""simple docstring"""
UpperCAmelCase =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( _a : Dict , _a : int , _a : Dict , _a : List[Any] ):
"""simple docstring"""
A = [False] * len(_a )
A = [s]
A = True
while queue:
A = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_a )
A = True
A = u
return visited[t]
def _A ( _a : str , _a : Optional[Any] , _a : Optional[int] ):
"""simple docstring"""
A = [-1] * (len(_a ))
A = 0
A = []
A = [i[:] for i in graph] # Record original cut, copy.
while bfs(_a , _a , _a , _a ):
A = float("""Inf""" )
A = sink
while s != source:
# Find the minimum value in select path
A = min(_a , graph[parent[s]][s] )
A = parent[s]
max_flow += path_flow
A = sink
while v != source:
A = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A = parent[v]
for i in range(len(_a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 617
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase__ :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , lowerCamelCase__=0 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = projection_dim
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = TFDPRContextEncoder(config=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = TFDPRQuestionEncoder(config=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = TFDPRReader(config=lowerCamelCase__ )
UpperCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class lowercase__ ( snake_case_, snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_snake_case = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TFDPRModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRContextEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFDPRReader.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCamelCase = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCamelCase = model(lowerCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCamelCase = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 708
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=1_8 , lowerCamelCase__=3_0 , lowerCamelCase__=4_0_0 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCamelCase = size if size is not None else {'''shortest_edge''': 1_8}
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = LevitImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = LevitImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCamelCase = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 350
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = 'roberta'
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any]=50_265 , lowercase__ : int=768 , lowercase__ : List[Any]=12 , lowercase__ : List[Any]=12 , lowercase__ : Dict=3_072 , lowercase__ : List[Any]="gelu" , lowercase__ : List[Any]=0.1 , lowercase__ : List[Any]=0.1 , lowercase__ : Union[str, Any]=512 , lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=0.0_2 , lowercase__ : Optional[int]=1e-12 , lowercase__ : str=1 , lowercase__ : Union[str, Any]=0 , lowercase__ : Dict=2 , lowercase__ : Union[str, Any]="absolute" , lowercase__ : Optional[Any]=True , lowercase__ : Tuple=None , **lowercase__ : List[str] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_UpperCamelCase : Dict = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : int = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Tuple = hidden_act
_UpperCamelCase : Tuple = intermediate_size
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : str = type_vocab_size
_UpperCamelCase : Union[str, Any] = initializer_range
_UpperCamelCase : int = layer_norm_eps
_UpperCamelCase : Optional[int] = position_embedding_type
_UpperCamelCase : Optional[int] = use_cache
_UpperCamelCase : Tuple = classifier_dropout
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def snake_case__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 435
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = projection_dim
__a = position_embedding_type
| 695
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__lowerCamelCase = '<<<<<<< This should probably be modified because it mentions: '
__lowerCamelCase = '=======\n>>>>>>>\n'
__lowerCamelCase = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
__lowerCamelCase = [
# (pattern, replacement)
# Order is important here for some replacements
(R'tfds\.core', R'datasets'),
(R'tf\.io\.gfile\.GFile', R'open'),
(R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'),
(R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'),
(R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'),
(R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('),
(R'tfds\.features\.FeaturesDict\(', R'dict('),
(R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(R'tfds\.', R'datasets.'),
(R'dl_manager\.manual_dir', R'self.config.data_dir'),
(R'self\.builder_config', R'self.config'),
]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
@staticmethod
def a__ ( _lowerCamelCase : str ):
_UpperCAmelCase : Tuple = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : str , _lowerCamelCase : List[str] , _lowerCamelCase : Any , *_lowerCamelCase : int ):
_UpperCAmelCase : Optional[int] = get_logger("datasets-cli/converting" )
_UpperCAmelCase : Dict = tfds_path
_UpperCAmelCase : str = datasets_directory
def a__ ( self : Union[str, Any] ):
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_UpperCAmelCase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : List[str] = os.listdir(_lowerCamelCase )
else:
_UpperCAmelCase : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
_UpperCAmelCase : List[str] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not os.path.isfile(_lowerCamelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_UpperCAmelCase : List[Any] = f.readlines()
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Optional[Any] = []
for line in lines:
_UpperCAmelCase : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase : int = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase : Union[str, Any] = ""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase : int = "from datasets import logging\n"
elif "getLogger" in out_line:
_UpperCAmelCase : Any = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase : str = True
_UpperCAmelCase : str = list(filter(lambda _lowerCamelCase : e in out_line , _lowerCamelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowerCamelCase ) + "\n" )
out_lines.append(_lowerCamelCase )
out_lines.append(_lowerCamelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase : Any = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase : List[Any] = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _lowerCamelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_UpperCAmelCase : List[str] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase : Optional[int] = True
out_lines.append(_lowerCamelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase : Optional[int] = f_name.replace(".py" , "" )
_UpperCAmelCase : Tuple = os.path.join(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowerCamelCase )
if needs_manual_update:
with_manual_update.append(_lowerCamelCase )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.writelines(_lowerCamelCase )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_UpperCAmelCase : Dict = os.path.basename(_lowerCamelCase )
_UpperCAmelCase : str = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_lowerCamelCase , _lowerCamelCase )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 702
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def snake_case__ ( _snake_case : Callable[[int | float], int | float] , _snake_case : int | float , _snake_case : int | float , _snake_case : int = 1_00 , ):
"""simple docstring"""
UpperCamelCase__ = x_start
UpperCamelCase__ = fnc(_snake_case )
UpperCamelCase__ = 0.0
for _ in range(_snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ = (x_end - x_start) / steps + xa
UpperCamelCase__ = fnc(_snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ = xa
UpperCamelCase__ = fxa
return area
if __name__ == "__main__":
def snake_case__ ( _snake_case : Optional[int] ):
"""simple docstring"""
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
A : Tuple = 10
while i <= 100_000:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 516
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def snake_case__ ( _snake_case : np.ndarray , _snake_case : tuple[int, int] , _snake_case : tuple[int, int] , _snake_case : bool , ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = grid.shape
UpperCamelCase__ = [-1, 1, 0, 0]
UpperCamelCase__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase__ , UpperCamelCase__ = [(0, source)], set()
UpperCamelCase__ = np.full((rows, cols) , np.inf )
UpperCamelCase__ = 0
UpperCamelCase__ = np.empty((rows, cols) , dtype=_snake_case )
UpperCamelCase__ = None
while queue:
((UpperCamelCase__) , (UpperCamelCase__)) = heappop(_snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase__ = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase__ , UpperCamelCase__ = predecessors[x, y]
path.append(_snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_snake_case ) ):
UpperCamelCase__ , UpperCamelCase__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_snake_case , (dist + 1, (nx, ny)) )
UpperCamelCase__ = dist + 1
UpperCamelCase__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 516
| 1
|
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase_ : str = "https://www.indeed.co.in/jobs?q=mobile+app+development&l="
def __lowercase( __snake_case : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__snake_case = BeautifulSoup(requests.get(url + location ).content ,'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' ,attrs={'data-tn-component': 'organicJob'} ):
__snake_case = job.find('a' ,attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__snake_case = job.find('span' ,{'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("Bangalore"), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 345
|
import os
def __lowercase( ) -> Tuple:
with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as f:
__snake_case = [] # noqa: E741
for _ in range(20 ):
l.append([int(__snake_case ) for x in f.readline().split()] )
__snake_case = 0
# right
for i in range(20 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__snake_case = temp
# down
for i in range(17 ):
for j in range(20 ):
__snake_case = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__snake_case = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__snake_case = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__snake_case = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
__snake_case = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__snake_case = temp
return maximum
if __name__ == "__main__":
print(solution())
| 345
| 1
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = params
lowerCamelCase__ = np.array(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ):
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] ):
return len(self.lengths )
def _UpperCamelCase ( self : List[str] ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = self.params.max_model_input_size
lowerCamelCase__ = self.lengths > max_len
logger.info(F'Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.' )
def divide_chunks(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
lowerCamelCase__ = []
lowerCamelCase__ = []
if self.params.mlm:
lowerCamelCase__ , lowerCamelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCamelCase__ , lowerCamelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCamelCase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCamelCase__ = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
lowerCamelCase__ = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
lowerCamelCase__ = np.array(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = np.array(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = len(self )
lowerCamelCase__ = self.lengths > 11
lowerCamelCase__ = self.token_ids[indices]
lowerCamelCase__ = self.lengths[indices]
lowerCamelCase__ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _UpperCamelCase ( self : Dict ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCamelCase__ = self.params.special_tok_ids['unk_token']
lowerCamelCase__ = len(self )
lowerCamelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCamelCase__ = (unk_occs / self.lengths) < 0.5
lowerCamelCase__ = self.token_ids[indices]
lowerCamelCase__ = self.lengths[indices]
lowerCamelCase__ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _UpperCamelCase ( self : Optional[int] ):
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _UpperCamelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCamelCase__ = [t[0] for t in batch]
lowerCamelCase__ = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
lowerCamelCase__ = self.params.special_tok_ids['pad_token']
else:
lowerCamelCase__ = self.params.special_tok_ids['unk_token']
lowerCamelCase__ = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
lowerCamelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCamelCase__ = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 510
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def snake_case ( _a: List[str] )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_a , _a ):
inputs.append(create_inputs(_a ) )
else:
raise ValueError(F'Invalid type requested: {input_type}' )
return inputs
def snake_case ( _a: List )-> Tuple:
'''simple docstring'''
lowerCamelCase__ = []
for output in outputs:
if isinstance(_a , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_a , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_a , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F'Invalid output: {output}' )
return output_types
@is_tool_test
class _a :
def _UpperCamelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCamelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , SCREAMING_SNAKE_CASE__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCamelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _UpperCamelCase ( self : List[str] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCamelCase__ = [outputs]
self.assertListEqual(output_types(SCREAMING_SNAKE_CASE__ ) , self.tool.outputs )
def _UpperCamelCase ( self : str ):
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def _UpperCamelCase ( self : str ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
for output, output_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.outputs ):
lowerCamelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( self : Optional[Any] ):
lowerCamelCase__ = create_inputs(self.tool.inputs )
lowerCamelCase__ = []
for _input, input_type in zip(SCREAMING_SNAKE_CASE__ , self.tool.inputs ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCamelCase__ = self.tool(*SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = [outputs]
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(self.tool.outputs ) )
| 510
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase , '''depth_multiplier''' ) )
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.25 , lowerCamelCase=8 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=32 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="relu6" , lowerCamelCase=1280 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : Dict = batch_size
snake_case__ : int = num_channels
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = depth_multiplier
snake_case__ : Union[str, Any] = depth_divisible_by
snake_case__ : Tuple = min_depth
snake_case__ : Optional[int] = expand_ratio
snake_case__ : Dict = tf_padding
snake_case__ : int = output_stride
snake_case__ : Dict = first_layer_is_expansion
snake_case__ : List[str] = finegrained_output
snake_case__ : List[Any] = hidden_act
snake_case__ : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case__ : Union[str, Any] = classifier_dropout_prob
snake_case__ : Any = use_labels
snake_case__ : Any = is_training
snake_case__ : List[str] = num_labels
snake_case__ : Tuple = initializer_range
snake_case__ : Optional[int] = scope
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self ) -> int:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
snake_case__ : Tuple = self.num_labels
snake_case__ : Dict = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Optional[int] = MobileNetVaForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
snake_case__ : List[str] = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = self.prepare_config_and_inputs()
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ : Optional[int] = config_and_inputs
snake_case__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = MobileNetVaModelTester(self )
snake_case__ : Dict = MobileNetVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(lowerCamelCase )
snake_case__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : List[Any] = 16
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ ,snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _A ( ):
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(lowerCamelCase )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Any = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**lowerCamelCase )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
snake_case__ : List[Any] = model.to(lowerCamelCase )
snake_case__ : str = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
snake_case__ : List[Any] = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : List[Any] = model(**lowerCamelCase )
snake_case__ : Any = outputs.logits
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 694
|
'''simple docstring'''
import socket
def _A ( ):
snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : str = socket.gethostname()
snake_case__ : Union[str, Any] = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
snake_case__ : int = sock.recv(10_24 )
if not data:
break
out_file.write(snake_case__ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 694
| 1
|
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[str] = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Dict = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : List[str] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : int = XCLIPVisionConfig(patch_size=__SCREAMING_SNAKE_CASE , num_frames=__SCREAMING_SNAKE_CASE )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
__SCREAMING_SNAKE_CASE : int = 30_72
__SCREAMING_SNAKE_CASE : Tuple = 12
__SCREAMING_SNAKE_CASE : int = 10_24
__SCREAMING_SNAKE_CASE : List[str] = 40_96
__SCREAMING_SNAKE_CASE : List[str] = 16
__SCREAMING_SNAKE_CASE : str = 24
__SCREAMING_SNAKE_CASE : Tuple = 7_68
__SCREAMING_SNAKE_CASE : List[Any] = 30_72
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 3_36
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPConfig.from_text_vision_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 7_68
return config
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : Any = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : str = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : str = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : List[Any] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Any = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : List[str] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Any = val[
:dim
]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Any = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Any = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : int = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : int = val[:dim]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : str = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : str = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
__SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = key_split[2]
__SCREAMING_SNAKE_CASE : int = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : int = val[:dim, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
__SCREAMING_SNAKE_CASE : List[Any] = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(__SCREAMING_SNAKE_CASE )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : List[str] = val.T
__SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] ) -> int:
if num_frames == 8:
__SCREAMING_SNAKE_CASE : str = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : List[str] = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : Dict = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=__SCREAMING_SNAKE_CASE , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : str = np.load(__SCREAMING_SNAKE_CASE )
return list(__SCREAMING_SNAKE_CASE )
def A_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Any = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : int = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : List[Any] = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Dict = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = 32
__SCREAMING_SNAKE_CASE : int = get_xclip_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = XCLIPModel(__SCREAMING_SNAKE_CASE )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Dict = '''pytorch_model.bin'''
gdown.cached_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : List[str] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE )['''model''']
__SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPModel(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Dict = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24
__SCREAMING_SNAKE_CASE : Dict = VideoMAEImageProcessor(size=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : int = XCLIPProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = prepare_video(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , padding=__SCREAMING_SNAKE_CASE )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : str = model(**__SCREAMING_SNAKE_CASE )
# Verify outputs
__SCREAMING_SNAKE_CASE : Tuple = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : int = logits_per_video.softmax(dim=1 )
print('''Probs:''' , __SCREAMING_SNAKE_CASE )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization='''nielsr''' )
processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization='''nielsr''' )
slow_tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE , organization='''nielsr''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 158
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( __SCREAMING_SNAKE_CASE : ndarray ) -> float:
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE_ :
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = regularization
__SCREAMING_SNAKE_CASE : List[str] = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : str = f"""Unknown kernel: {kernel}"""
raise ValueError(lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = observations
__SCREAMING_SNAKE_CASE : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE) , ) : int = np.shape(lowercase )
def to_minimize(lowercase ) -> float:
__SCREAMING_SNAKE_CASE : Dict = 0
((__SCREAMING_SNAKE_CASE) , ) : Optional[Any] = np.shape(lowercase )
for i in range(lowercase ):
for j in range(lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase )
__SCREAMING_SNAKE_CASE : str = LinearConstraint(lowercase , 0 , 0 )
__SCREAMING_SNAKE_CASE : Dict = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : Optional[int] = minimize(
lowercase , np.ones(lowercase ) , bounds=lowercase , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : str = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(lowercase ):
for j in range(lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : Tuple = s / n
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654
| 0
|
def __lowerCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> int:
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError("""String lengths must match!""" )
lowerCamelCase_ = 0
for chara, chara in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''LayoutLMv2FeatureExtractor''']
lowercase = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 643
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = "yolos"
def __init__( self , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=[512, 864] , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : int = num_attention_heads
UpperCamelCase : Dict = intermediate_size
UpperCamelCase : Dict = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : int = image_size
UpperCamelCase : Any = patch_size
UpperCamelCase : str = num_channels
UpperCamelCase : str = qkv_bias
UpperCamelCase : Tuple = num_detection_tokens
UpperCamelCase : List[Any] = use_mid_position_embeddings
UpperCamelCase : Dict = auxiliary_loss
# Hungarian matcher
UpperCamelCase : Optional[Any] = class_cost
UpperCamelCase : Union[str, Any] = bbox_cost
UpperCamelCase : Any = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = bbox_loss_coefficient
UpperCamelCase : Union[str, Any] = giou_loss_coefficient
UpperCamelCase : Dict = eos_coefficient
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = version.parse("1.11")
@property
def _lowercase ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
"""simple docstring"""
return 1e-4
@property
def _lowercase ( self ):
"""simple docstring"""
return 12
| 643
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCamelCase__ = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any]=None ):
if subparsers is not None:
A__ = subparsers.add_parser("""tpu-config""" , description=_description )
else:
A__ = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
A__ = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=UpperCamelCase , default=UpperCamelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=UpperCamelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=UpperCamelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
A__ = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=UpperCamelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase )
return parser
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] ):
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(UpperCamelCase ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
A__ = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , UpperCamelCase ):
A__ = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , UpperCamelCase ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
A__ = """; """.join(UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(UpperCamelCase )}""" )
return
subprocess.run(UpperCamelCase )
print("""Successfully setup pod.""" )
def _SCREAMING_SNAKE_CASE ( ):
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(UpperCamelCase )
| 574
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str ):
def get_masked_lm_array(UpperCamelCase : str ):
A__ = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_array(UpperCamelCase : str ):
A__ = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_layer_array(UpperCamelCase : int , UpperCamelCase : str ):
A__ = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
def get_encoder_attention_layer_array(UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int ):
A__ = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
A__ = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
A__ = array.reshape(UpperCamelCase )
if "kernel" in name:
A__ = array.transpose()
return torch.from_numpy(UpperCamelCase )
print(F"""Loading model based on config from {config_path}...""" )
A__ = BertConfig.from_json_file(UpperCamelCase )
A__ = BertForMaskedLM(UpperCamelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
A__ = model.bert.encoder.layer[layer_index]
# Self-attention
A__ = layer.attention.self
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
A__ = layer.attention.output
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
A__ = get_encoder_attention_layer_array(
UpperCamelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_attention_layer_norm/beta""" )
# Intermediate
A__ = layer.intermediate
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_intermediate_dense/bias""" )
# Output
A__ = layer.output
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/kernel""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_dense/bias""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/gamma""" )
A__ = get_encoder_layer_array(UpperCamelCase , """_output_layer_norm/beta""" )
# Embeddings
A__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
A__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
A__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
A__ = model.cls.predictions.transform
A__ = get_masked_lm_array("""dense/kernel""" )
A__ = get_masked_lm_array("""dense/bias""" )
A__ = get_masked_lm_array("""layer_norm/gamma""" )
A__ = get_masked_lm_array("""layer_norm/beta""" )
A__ = get_masked_lm_array("""embedding_table""" )
# Pooling
A__ = BertPooler(config=UpperCamelCase )
A__ = get_encoder_array("""_pooler_layer/kernel""" )
A__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase )
# Integration test - should load without any errors ;)
A__ = BertForMaskedLM.from_pretrained(UpperCamelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCamelCase__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 574
| 1
|
"""simple docstring"""
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
__A : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
__A : Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = position % (lowest * 2) # puts it in bounds
__A : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = [''.join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
__A : Optional[Any] = ''.join(_SCREAMING_SNAKE_CASE )
return output_string
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
__A : List[Any] = []
__A : List[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__A : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A : str = position % (lowest * 2) # puts it in bounds
__A : Tuple = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__A : Tuple = 0
for row in temp_grid: # fills in the characters
__A : Any = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = '' # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A : Optional[int] = position % (lowest * 2) # puts it in bounds
__A : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> dict[int, str]:
'''simple docstring'''
__A : int = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
__A : List[Any] = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
lowerCamelCase : int =[0, 2, 4, 6, 8]
lowerCamelCase : List[str] =[1, 3, 5, 7, 9]
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__A : Union[str, Any] = 0
for digit in range(10 ):
__A : Dict = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return result
__A : Union[str, Any] = 0
for digita in range(10 ):
__A : Tuple = digita
if (remainder + digita) % 2 == 0:
__A : Union[str, Any] = ODD_DIGITS
else:
__A : Optional[int] = EVEN_DIGITS
for digita in other_parity_digits:
__A : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return result
def _lowercase ( _SCREAMING_SNAKE_CASE : int = 9 ) -> int:
'''simple docstring'''
__A : Tuple = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_SCREAMING_SNAKE_CASE , 0 , [0] * length , _SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 237
| 0
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase ( snake_case__ : Tuple ) -> Union[str, Any]:
return EnvironmentCommand()
class lowerCAmelCase_ ( a__ ):
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : str = parser.add_parser('env' )
download_parser.set_defaults(func=_UpperCAmelCase )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[str] = huggingface_hub.__version__
UpperCamelCase : Dict = 'not installed'
UpperCamelCase : Optional[int] = 'NA'
if is_torch_available():
import torch
UpperCamelCase : Dict = torch.__version__
UpperCamelCase : List[Any] = torch.cuda.is_available()
UpperCamelCase : List[str] = 'not installed'
if is_transformers_available():
import transformers
UpperCamelCase : Union[str, Any] = transformers.__version__
UpperCamelCase : List[Any] = 'not installed'
if is_accelerate_available():
import accelerate
UpperCamelCase : Dict = accelerate.__version__
UpperCamelCase : Dict = 'not installed'
if is_xformers_available():
import xformers
UpperCamelCase : int = xformers.__version__
UpperCamelCase : List[Any] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(_UpperCAmelCase ) )
return info
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 40
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : int , snake_case__ : CLIPSegForImageSegmentation , snake_case__ : CLIPSegProcessor , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowercase :int = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Any = dict(scheduler.config )
lowercase :Any = 1
lowercase :int = FrozenDict(snake_case__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowercase :Optional[int] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
lowercase :Dict = dict(scheduler.config )
lowercase :Optional[int] = True
lowercase :List[Any] = FrozenDict(snake_case__ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=snake_case__ , segmentation_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , )
def __snake_case ( self : List[str] , snake_case__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase :List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
self.enable_attention_slicing(snake_case__ )
def __snake_case ( self : Tuple ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase :List[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Tuple , snake_case__ : Union[str, List[str]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : str , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_0 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , **snake_case__ : Any , ):
'''simple docstring'''
lowercase :Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowercase :Any = self.segmentation_model(**snake_case__ )
lowercase :Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase :Tuple = self.numpy_to_pil(snake_case__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase :Any = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , )
| 475
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
UpperCAmelCase = '''
Human: <<task>>
Assistant: '''
UpperCAmelCase = '''huggingface-tools/default-prompts'''
UpperCAmelCase = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def lowerCamelCase (a_ :int , a_ :str , a_ :Dict="run") -> Optional[Any]:
if prompt_or_repo_id is None:
lowercase :Tuple = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a_) is not None:
return prompt_or_repo_id
lowercase :List[str] = cached_file(
a_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name})
with open(a_ , '''r''' , encoding='''utf-8''') as f:
return f.read()
| 475
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCAmelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCAmelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _A ( A__ , A__ ):
"""simple docstring"""
return float((preds == labels).mean() )
def _A ( A__ , A__ , A__="binary" ):
"""simple docstring"""
__lowercase = simple_accuracy(A__ , A__ )
__lowercase = float(fa_score(y_true=A__ , y_pred=A__ , average=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = {}
for id_pred, label in zip(A__ , A__ ):
__lowercase = F"{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"
__lowercase = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowercase = [(pred, label)]
__lowercase , __lowercase = [], []
for question, preds_labels in question_map.items():
__lowercase , __lowercase = zip(*A__ )
__lowercase = fa_score(y_true=A__ , y_pred=A__ , average='''macro''' )
fas.append(A__ )
__lowercase = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) )
ems.append(A__ )
__lowercase = float(sum(A__ ) / len(A__ ) )
__lowercase = sum(A__ ) / len(A__ )
__lowercase = float(fa_score(y_true=A__ , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(lowercase__ ,lowercase__ )}
elif self.config_name == "cb":
return acc_and_fa(lowercase__ ,lowercase__ ,fa_avg='''macro''' )
elif self.config_name == "record":
__lowercase = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
__lowercase = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(lowercase__ ,lowercase__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(lowercase__ ,lowercase__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(lowercase__ ,lowercase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 41
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 538
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
A_ = ConsistencyModelPipeline
A_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
A_ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowercase : str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def __UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
_lowercase : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Optional[Any]=False ) -> Tuple:
'''simple docstring'''
if class_cond:
_lowercase : Union[str, Any] = self.dummy_cond_unet
else:
_lowercase : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
_lowercase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
}
return components
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : Any=0 ) -> Optional[int]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
_lowercase : Optional[int] = torch.manual_seed(__a )
else:
_lowercase : List[str] = torch.Generator(device=__a ).manual_seed(__a )
_lowercase : Any = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def __UpperCAmelCase ( self : str ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Tuple = self.get_dummy_components()
_lowercase : Optional[int] = ConsistencyModelPipeline(**__a )
_lowercase : int = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Dict = self.get_dummy_inputs(__a )
_lowercase : Optional[Any] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[int] = image[0, -3:, -3:, -1]
_lowercase : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Union[str, Any] = self.get_dummy_components(class_cond=__a )
_lowercase : List[Any] = ConsistencyModelPipeline(**__a )
_lowercase : Tuple = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Tuple = self.get_dummy_inputs(__a )
_lowercase : Tuple = 0
_lowercase : List[str] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowercase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Any = self.get_dummy_components()
_lowercase : List[Any] = ConsistencyModelPipeline(**__a )
_lowercase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Union[str, Any] = self.get_dummy_inputs(__a )
_lowercase : Union[str, Any] = 1
_lowercase : Union[str, Any] = None
_lowercase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : str = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[Any] = self.get_dummy_components(class_cond=__a )
_lowercase : List[str] = ConsistencyModelPipeline(**__a )
_lowercase : List[str] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Union[str, Any] = self.get_dummy_inputs(__a )
_lowercase : List[Any] = 1
_lowercase : List[str] = None
_lowercase : int = 0
_lowercase : int = pipe(**__a ).images
assert image.shape == (1, 32, 32, 3)
_lowercase : Tuple = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Optional[Any]="cpu" , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : Union[str, Any]=(1, 3, 64, 64) ) -> Tuple:
'''simple docstring'''
_lowercase : Optional[int] = torch.manual_seed(__a )
_lowercase : Optional[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
_lowercase : List[str] = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a )
_lowercase : List[Any] = latents
return inputs
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : str=0 , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : List[str]=torch.floataa , UpperCamelCase_ : Tuple=(1, 3, 64, 64) ) -> Dict:
'''simple docstring'''
if type(__a ) == str:
_lowercase : int = torch.device(__a )
_lowercase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
_lowercase : List[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
return latents
def __UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Any = self.get_inputs()
_lowercase : List[str] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : int = image[0, -3:, -3:, -1]
_lowercase : Tuple = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowercase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : List[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a )
pipe.set_progress_bar_config(disable=__a )
_lowercase : List[str] = self.get_inputs()
_lowercase : List[str] = 1
_lowercase : Optional[Any] = None
_lowercase : Optional[int] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Optional[Any] = image[0, -3:, -3:, -1]
_lowercase : int = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
_lowercase : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Any = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_lowercase : Optional[int] = self.get_inputs(get_fixed_latents=__a , device=__a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_lowercase : Union[str, Any] = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Tuple = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
_lowercase : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
_lowercase : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , )
_lowercase : Optional[Any] = ConsistencyModelPipeline(unet=__a , scheduler=__a )
pipe.to(torch_device=__a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__a )
_lowercase : int = self.get_inputs(get_fixed_latents=__a , device=__a )
_lowercase : Any = 1
_lowercase : Any = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ):
_lowercase : int = pipe(**__a ).images
assert image.shape == (1, 64, 64, 3)
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
_lowercase : Any = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 706
|
'''simple docstring'''
_A : Optional[Any] ='''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowercase, _lowercase ):
_lowercase : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
_lowercase : int = ''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
_lowercase : Dict = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
_lowercase : Optional[Any] = B'=' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
_lowercase : Optional[int] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(_lowercase ), 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( _lowercase ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowercase, _lowercase ) and not isinstance(_lowercase, _lowercase ):
_lowercase : int = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase, _lowercase ):
try:
_lowercase : Optional[int] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
_lowercase : Optional[int] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_lowercase : str = encoded_data[:-padding]
_lowercase : Tuple = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_lowercase : Union[str, Any] = ''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
_lowercase : List[str] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(_lowercase ), 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4
| 0
|
from math import factorial
def _a ( lowerCamelCase = 100 ):
return sum(map(lowerCamelCase, str(factorial(lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 681
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _a ( lowerCamelCase ):
# vision encoder
if "img_encoder.pos_embed" in name:
lowerCamelCase : Tuple = name.replace("""img_encoder.pos_embed""", """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase : Union[str, Any] = name.replace("""img_encoder.patch_embed.proj""", """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.patch_embed.norm""", """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCamelCase : List[str] = name.replace("""img_encoder.layers""", """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCamelCase : List[Any] = name.replace("""blocks""", """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase : Optional[int] = name.replace("""attn""", """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase : Optional[int] = name.replace("""proj""", """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase : Any = name.replace("""pre_assign_attn.attn.proj""", """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCamelCase : Optional[Any] = name.replace("""norm1""", """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase : Union[str, Any] = name.replace("""norm2""", """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCamelCase : Optional[int] = name.replace("""img_encoder.norm""", """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase : int = name.replace("""text_encoder.token_embedding""", """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase : Optional[Any] = name.replace("""text_encoder.transformer.resblocks.""", """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCamelCase : Optional[Any] = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
lowerCamelCase : str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
lowerCamelCase : Any = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
lowerCamelCase : Tuple = name.replace("""c_proj""", """fc2""" )
if "text_encoder" in name:
lowerCamelCase : List[str] = name.replace("""text_encoder""", """text_model""" )
if "ln_final" in name:
lowerCamelCase : Tuple = name.replace("""ln_final""", """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase : Optional[int] = name.replace("""img_projector.linear_hidden.""", """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCamelCase : Tuple = name.replace("""img_projector.linear_out.""", """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_hidden""", """text_projection""" )
if "text_projector.linear_out" in name:
lowerCamelCase : Tuple = name.replace("""text_projector.linear_out""", """text_projection.3""" )
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : Any = key.split(""".""" )
lowerCamelCase , lowerCamelCase : Optional[Any] = int(key_split[2] ), int(key_split[4] )
lowerCamelCase : List[Any] = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase : int = val[:dim, :]
lowerCamelCase : List[str] = val[dim : dim * 2, :]
lowerCamelCase : Dict = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[:dim]
lowerCamelCase : List[Any] = val[dim : dim * 2]
lowerCamelCase : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase : str = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_split[3] )
lowerCamelCase : List[str] = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase : Optional[int] = val[:dim, :]
lowerCamelCase : Any = val[
dim : dim * 2, :
]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : Union[str, Any] = val[:dim]
lowerCamelCase : Optional[int] = val[dim : dim * 2]
lowerCamelCase : Union[str, Any] = val[-dim:]
else:
lowerCamelCase : List[Any] = rename_key(lowerCamelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase : Any = val.squeeze_()
else:
lowerCamelCase : Union[str, Any] = val
return orig_state_dict
def _a ( ):
lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase="groupvit-gcc-yfcc", lowerCamelCase=False ):
lowerCamelCase : int = GroupViTConfig()
lowerCamelCase : Dict = GroupViTModel(lowerCamelCase ).eval()
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase, map_location="""cpu""" )["""model"""]
lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase , lowerCamelCase : Tuple = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase ) == 0)
# verify result
lowerCamelCase : int = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCamelCase : int = prepare_img()
lowerCamelCase : int = processor(text=["""a photo of a cat""", """a photo of a dog"""], images=lowerCamelCase, padding=lowerCamelCase, return_tensors="""pt""" )
with torch.no_grad():
lowerCamelCase : int = model(**lowerCamelCase )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase : Any = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase : Any = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image, lowerCamelCase, atol=1e-3 )
processor.save_pretrained(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print("""Successfully saved processor and model to""", lowerCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCamelCase, organization="""nielsr""" )
model.push_to_hub(lowerCamelCase, organization="""nielsr""" )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
_lowerCamelCase =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 681
| 1
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-10 ):
"""simple docstring"""
A_ : Optional[Any] = a
while True:
A_ : Dict = Decimal(_UpperCAmelCase ) - (
Decimal(eval(_UpperCAmelCase ) ) / Decimal(eval(str(diff(_UpperCAmelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_UpperCAmelCase ) ) < precision: # noqa: S307
return float(_UpperCAmelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 361
|
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowercase :
def __init__( self : Dict ):
"""simple docstring"""
A_ : Tuple = {}
def a_ ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any]=1 ):
"""simple docstring"""
if self.graph.get(_lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A_ : int = [[w, v]]
if not self.graph.get(_lowerCamelCase ):
A_ : List[Any] = []
def a_ ( self : Optional[int] ):
"""simple docstring"""
return list(self.graph )
def a_ ( self : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple ):
"""simple docstring"""
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
def a_ ( self : Union[str, Any] , _lowerCamelCase : Optional[int]=-2 , _lowerCamelCase : Tuple=-1 ):
"""simple docstring"""
if s == d:
return []
A_ : Union[str, Any] = []
A_ : Optional[int] = []
if s == -2:
A_ : int = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
A_ : Optional[int] = stack[len(_lowerCamelCase ) - 1]
else:
A_ : List[Any] = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def a_ ( self : Dict , _lowerCamelCase : List[Any]=-1 ):
"""simple docstring"""
if c == -1:
A_ : List[Any] = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A_ : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase , _lowerCamelCase , 1 )
def a_ ( self : Optional[int] , _lowerCamelCase : Union[str, Any]=-2 ):
"""simple docstring"""
A_ : List[str] = deque()
A_ : int = []
if s == -2:
A_ : Any = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
A_ : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : List[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def a_ ( self : Any , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return len(self.graph[u] )
def a_ ( self : Union[str, Any] , _lowerCamelCase : Tuple=-2 ):
"""simple docstring"""
A_ : int = []
A_ : Optional[Any] = []
if s == -2:
A_ : List[str] = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : str = s
A_ : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCamelCase ) != 0:
A_ : List[str] = stack[len(_lowerCamelCase ) - 1]
else:
A_ : int = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return sorted_nodes
def a_ ( self : Dict ):
"""simple docstring"""
A_ : str = []
A_ : Dict = []
A_ : Dict = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : Any = -2
A_ : List[str] = []
A_ : Dict = s
A_ : Tuple = False
A_ : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Tuple = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Optional[int] = True
if len(_lowerCamelCase ) != 0:
A_ : Union[str, Any] = stack[len(_lowerCamelCase ) - 1]
else:
A_ : int = False
indirect_parents.append(_lowerCamelCase )
A_ : Dict = s
A_ : Optional[int] = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Dict = []
A_ : Dict = []
A_ : Optional[Any] = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : Optional[Any] = -2
A_ : List[str] = []
A_ : List[Any] = s
A_ : int = False
A_ : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Dict = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : str = True
if len(_lowerCamelCase ) != 0:
A_ : Optional[int] = stack[len(_lowerCamelCase ) - 1]
else:
A_ : Tuple = False
indirect_parents.append(_lowerCamelCase )
A_ : int = s
A_ : List[Any] = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def a_ ( self : Tuple , _lowerCamelCase : Union[str, Any]=-2 , _lowerCamelCase : int=-1 ):
"""simple docstring"""
A_ : int = time()
self.dfs(_lowerCamelCase , _lowerCamelCase )
A_ : int = time()
return end - begin
def a_ ( self : Optional[Any] , _lowerCamelCase : str=-2 ):
"""simple docstring"""
A_ : int = time()
self.bfs(_lowerCamelCase )
A_ : Union[str, Any] = time()
return end - begin
class lowercase :
def __init__( self : Any ):
"""simple docstring"""
A_ : Tuple = {}
def a_ ( self : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=1 ):
"""simple docstring"""
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A_ : Union[str, Any] = [[w, v]]
# add the other way
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A_ : Tuple = [[w, u]]
def a_ ( self : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
"""simple docstring"""
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
# the other way round
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : str=-2 , _lowerCamelCase : int=-1 ):
"""simple docstring"""
if s == d:
return []
A_ : Optional[Any] = []
A_ : List[Any] = []
if s == -2:
A_ : Optional[int] = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A_ : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
A_ : List[Any] = stack[len(_lowerCamelCase ) - 1]
else:
A_ : str = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def a_ ( self : Optional[Any] , _lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
if c == -1:
A_ : List[Any] = floor(random() * 1_00_00 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A_ : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase , _lowerCamelCase , 1 )
def a_ ( self : Dict , _lowerCamelCase : List[Any]=-2 ):
"""simple docstring"""
A_ : Dict = deque()
A_ : Tuple = []
if s == -2:
A_ : List[str] = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
A_ : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def a_ ( self : Any , _lowerCamelCase : int ):
"""simple docstring"""
return len(self.graph[u] )
def a_ ( self : Any ):
"""simple docstring"""
A_ : Dict = []
A_ : Optional[int] = []
A_ : Union[str, Any] = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : str = -2
A_ : int = []
A_ : Optional[Any] = s
A_ : str = False
A_ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Tuple = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Union[str, Any] = True
if len(_lowerCamelCase ) != 0:
A_ : Tuple = stack[len(_lowerCamelCase ) - 1]
else:
A_ : str = False
indirect_parents.append(_lowerCamelCase )
A_ : Union[str, Any] = s
A_ : List[str] = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : List[str] = []
A_ : int = []
A_ : List[Any] = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
A_ : int = -2
A_ : Tuple = []
A_ : Any = s
A_ : Tuple = False
A_ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A_ : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A_ : Optional[int] = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A_ : Optional[int] = True
if len(_lowerCamelCase ) != 0:
A_ : str = stack[len(_lowerCamelCase ) - 1]
else:
A_ : List[Any] = False
indirect_parents.append(_lowerCamelCase )
A_ : List[Any] = s
A_ : List[Any] = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
return list(self.graph )
def a_ ( self : Tuple , _lowerCamelCase : Union[str, Any]=-2 , _lowerCamelCase : str=-1 ):
"""simple docstring"""
A_ : Optional[int] = time()
self.dfs(_lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = time()
return end - begin
def a_ ( self : Tuple , _lowerCamelCase : str=-2 ):
"""simple docstring"""
A_ : Optional[int] = time()
self.bfs(_lowerCamelCase )
A_ : List[str] = time()
return end - begin
| 361
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = (DPMSolverSinglestepScheduler,)
UpperCAmelCase : str = (('''num_inference_steps''', 25),)
def lowerCAmelCase_ ( self : str , **_UpperCAmelCase : Any ):
_A = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**_UpperCAmelCase )
return config
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any]=0 , **_UpperCAmelCase : Optional[Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A , _A = sample, sample
for t in range(_UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[Any]=0 , **_UpperCAmelCase : Union[str, Any] ):
_A = dict(self.forward_default_kwargs )
_A = kwargs.pop('num_inference_steps' , _UpperCAmelCase )
_A = self.dummy_sample
_A = 0.1 * sample
_A = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_A = self.get_scheduler_config()
_A = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_A = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_A = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_A = dummy_past_residuals[: new_scheduler.config.solver_order]
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_A = new_scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any=None , **_UpperCAmelCase : str ):
if scheduler is None:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(**_UpperCAmelCase )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self : List[str] ):
_A = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_A = 50
_A = self.dummy_model()
_A = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def lowerCAmelCase_ ( self : int ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_A = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_A = self.full_loop(scheduler=_UpperCAmelCase )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_A = DEISMultistepScheduler.from_config(scheduler.config )
_A = DPMSolverMultistepScheduler.from_config(scheduler.config )
_A = UniPCMultistepScheduler.from_config(scheduler.config )
_A = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_A = self.full_loop(scheduler=_UpperCAmelCase )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCAmelCase_ ( self : str ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , )
_A = self.full_loop(
solver_order=_UpperCAmelCase , solver_type=_UpperCAmelCase , prediction_type=_UpperCAmelCase , algorithm_type=_UpperCAmelCase , )
assert not torch.isnan(_UpperCAmelCase ).any(), "Samples have nan numbers"
def lowerCAmelCase_ ( self : Optional[Any] ):
self.check_over_configs(lower_order_final=_UpperCAmelCase )
self.check_over_configs(lower_order_final=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.check_over_configs(variance_type=_UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def lowerCAmelCase_ ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCAmelCase , time_step=0 )
def lowerCAmelCase_ ( self : str ):
_A = self.full_loop()
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def lowerCAmelCase_ ( self : int ):
_A = self.full_loop(use_karras_sigmas=_UpperCAmelCase )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def lowerCAmelCase_ ( self : str ):
_A = self.full_loop(prediction_type='v_prediction' )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=_UpperCAmelCase )
_A = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(thresholding=_UpperCAmelCase , dynamic_thresholding_ratio=0 )
_A = scheduler_class(**_UpperCAmelCase )
_A = 10
_A = self.dummy_model()
_A = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
_A = model(_UpperCAmelCase , _UpperCAmelCase )
_A = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 7
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE = FlaxResnetBlockaD(
in_channels=lowerCamelCase ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnets
__SCREAMING_SNAKE_CASE = attentions
if self.add_downsample:
__SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
__SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
__SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
__SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE = self.in_channels if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE = FlaxResnetBlockaD(
in_channels=lowerCamelCase ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnets
if self.add_downsample:
__SCREAMING_SNAKE_CASE = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ()
for resnet in self.resnets:
__SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
output_states += (hidden_states,)
if self.add_downsample:
__SCREAMING_SNAKE_CASE = self.downsamplers_a(lowerCamelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnets
__SCREAMING_SNAKE_CASE = attentions
if self.add_upsample:
__SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : int ,lowerCamelCase : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
__SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1]
__SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1]
__SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
__SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
__SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
if self.add_upsample:
__SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : bool = True
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__SCREAMING_SNAKE_CASE = self.prev_output_channel if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnets
if self.add_upsample:
__SCREAMING_SNAKE_CASE = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self : Dict ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__SCREAMING_SNAKE_CASE = res_hidden_states_tuple[-1]
__SCREAMING_SNAKE_CASE = res_hidden_states_tuple[:-1]
__SCREAMING_SNAKE_CASE = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
__SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
if self.add_upsample:
__SCREAMING_SNAKE_CASE = self.upsamplers_a(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : float = 0.0
__UpperCamelCase : int = 1
__UpperCamelCase : int = 1
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
__SCREAMING_SNAKE_CASE = []
for _ in range(self.num_layers ):
__SCREAMING_SNAKE_CASE = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnets
__SCREAMING_SNAKE_CASE = attentions
def __call__( self : List[str] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : Tuple ,lowerCamelCase : Any=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.resnets[0](lowerCamelCase ,lowerCamelCase )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
__SCREAMING_SNAKE_CASE = attn(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
__SCREAMING_SNAKE_CASE = resnet(lowerCamelCase ,lowerCamelCase ,deterministic=lowerCamelCase )
return hidden_states
| 13
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13
| 1
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 1000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = 3
lowerCAmelCase__ :str = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93
|
'''simple docstring'''
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCAmelCase__ ( *lowerCamelCase : int ,**lowerCamelCase : List[str] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __a ( unittest.TestCase ):
__UpperCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A ,image_processor=_A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ,threshold=0.0 )
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} ,)
import datasets
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
__SCREAMING_SNAKE_CASE = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__SCREAMING_SNAKE_CASE = object_detector(_A ,threshold=0.0 )
self.assertEqual(len(_A ) ,len(_A ) )
for outputs in batch_outputs:
self.assertGreater(len(_A ) ,0 )
for detected_object in outputs:
self.assertEqual(
_A ,{
"""score""": ANY(_A ),
"""label""": ANY(_A ),
"""box""": {"""xmin""": ANY(_A ), """ymin""": ANY(_A ), """xmax""": ANY(_A ), """ymax""": ANY(_A )},
} ,)
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=0.0 )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] ,)
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] ,)
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'facebook/detr-resnet-50'
__SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A ,feature_extractor=_A )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'facebook/detr-resnet-50'
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=_A )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
__SCREAMING_SNAKE_CASE = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0.9_985
__SCREAMING_SNAKE_CASE = 'facebook/detr-resnet-50'
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=_A )
__SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=_A )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'Narsil/layoutlmv3-finetuned-funsd'
__SCREAMING_SNAKE_CASE = 0.9_993
__SCREAMING_SNAKE_CASE = pipeline("""object-detection""" ,model=_A ,threshold=_A )
__SCREAMING_SNAKE_CASE = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(_A ,decimals=4 ) ,[
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] ,)
| 715
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : Union[str, Any]=13 , A_ : List[Any]=30 , A_ : Optional[Any]=2 , A_ : List[str]=3 , A_ : List[str]=True , A_ : Dict=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : str=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Any , A_ : int , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70
|
'''simple docstring'''
import math
def a__ ( a__ ):
"""simple docstring"""
return math.sqrt(a__ ) * math.sqrt(a__ ) == num
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = n
while left <= right:
__SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__SCREAMING_SNAKE_CASE = mid - 1
else:
__SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 627
| 0
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case_ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = None
lowerCamelCase = None
snake_case_ : Dict = namedtuple("CoinsDistribResult", "moves excess")
def __a ( __UpperCAmelCase : TreeNode | None ) -> int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__UpperCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__UpperCAmelCase ) != count_coins(__UpperCAmelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__UpperCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCamelCase_ , lowerCamelCase_ : Dict = get_distrib(node.left )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = get_distrib(node.right )
lowerCamelCase_ : List[Any] = 1 - left_distrib_excess
lowerCamelCase_ : int = 1 - right_distrib_excess
lowerCamelCase_ : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(__UpperCAmelCase )
+ abs(__UpperCAmelCase )
)
lowerCamelCase_ : Optional[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__UpperCAmelCase , __UpperCAmelCase )
return get_distrib(__UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str]=False ) -> Any:
"""simple docstring"""
try:
lowerCamelCase_ : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCamelCase_ : Dict = default
else:
# KEY is set, convert it to True or False.
try:
lowerCamelCase_ : Any = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
snake_case_ : Dict = parse_flag_from_env("RUN_SLOW", default=False)
snake_case_ : int = parse_flag_from_env("RUN_REMOTE", default=False)
snake_case_ : List[str] = parse_flag_from_env("RUN_LOCAL", default=True)
snake_case_ : int = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
snake_case_ : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
snake_case_ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
snake_case_ : List[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
snake_case_ : Dict = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
snake_case_ : List[Any] = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
snake_case_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
snake_case_ : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def __a ( __UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires faiss" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test requires regex" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
lowerCamelCase_ : Dict = unittest.skip("test requires elasticsearch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
lowerCamelCase_ : Optional[int] = unittest.skip("test requires sqlalchemy" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
lowerCamelCase_ : Dict = unittest.skip("test requires PyTorch" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
if not config.TF_AVAILABLE:
lowerCamelCase_ : Optional[Any] = unittest.skip("test requires TensorFlow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
if not config.JAX_AVAILABLE:
lowerCamelCase_ : Tuple = unittest.skip("test requires JAX" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
lowerCamelCase_ : int = unittest.skip("test requires Pillow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
def _require_spacy_model(__UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def __a ( __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__UpperCAmelCase )
else:
return test_case
def __a ( __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test is slow" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
lowerCamelCase_ : List[Any] = unittest.skip("test is local" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCamelCase_ : Union[str, Any] = unittest.skip("test is packaged" )(__UpperCAmelCase )
return test_case
def __a ( __UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
lowerCamelCase_ : Any = unittest.skip("test requires remote" )(__UpperCAmelCase )
return test_case
def __a ( *__UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
lowerCamelCase_ : int = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = 0
lowerCamelCase = 1
lowerCamelCase = 2
@contextmanager
def __a ( __UpperCAmelCase : Tuple=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : Any=1e-16 ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
lowerCamelCase_ : str = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCamelCase_ : List[Any] = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCamelCase_ : Any = url
lowerCamelCase_ : Tuple = e.args[0]
lowerCamelCase_ : Union[str, Any] = (max_retry_error.args[0].replace("10.255.255.1" , f"OfflineMock[{url}]" ),)
lowerCamelCase_ : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , **__UpperCAmelCase : Dict ):
raise requests.ConnectionError("Offline mode is enabled." , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __UpperCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __a ( *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __a ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
lowerCamelCase_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 100 , 10 ).tolist()
def __a ( __UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : Any , *__UpperCAmelCase : Dict , **__UpperCAmelCase : str ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith("500" ) or str(__UpperCAmelCase ).startswith("502" ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class snake_case_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str] ) -> Any:
lowerCamelCase_ : int = returncode
lowerCamelCase_ : int = stdout
lowerCamelCase_ : Union[str, Any] = stderr
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
while True:
lowerCamelCase_ : List[str] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(__UpperCAmelCase ) )
lowerCamelCase_ : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : Optional[Any] = []
def tee(__UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]="" ):
lowerCamelCase_ : Optional[int] = line.decode("utf-8" ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=180 , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Dict=True ) -> _RunOutput:
"""simple docstring"""
lowerCamelCase_ : List[str] = asyncio.get_event_loop()
lowerCamelCase_ : Tuple = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
lowerCamelCase_ : Tuple = " ".join(__UpperCAmelCase )
if result.returncode > 0:
lowerCamelCase_ : int = "\n".join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
lowerCamelCase_ : Optional[Any] = re.sub(R"^gw" , "" , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def __a ( ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = 29500
lowerCamelCase_ : int = pytest_xdist_worker_id()
return port + uniq_delta
| 253
| 1
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def A_ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def A_ ( ) -> Tuple:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def A_ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = """mock-s3-bucket"""
UpperCAmelCase_ : int = f'''s3://{mock_bucket}'''
UpperCAmelCase_ : Any = extract_path_from_uri(lowercase )
assert dataset_path.startswith("""s3://""" ) is False
UpperCAmelCase_ : List[str] = """./local/path"""
UpperCAmelCase_ : Union[str, Any] = extract_path_from_uri(lowercase )
assert dataset_path == new_dataset_path
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = is_remote_filesystem(lowercase )
assert is_remote is True
UpperCAmelCase_ : Optional[Any] = fsspec.filesystem("""file""" )
UpperCAmelCase_ : Tuple = is_remote_filesystem(lowercase )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowercase )
def A_ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
UpperCAmelCase_ : Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
UpperCAmelCase_ : Tuple = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowercase )
UpperCAmelCase_ : Optional[int] = fsspec.filesystem(compression_fs_class.protocol , fo=lowercase )
assert isinstance(lowercase , lowercase )
UpperCAmelCase_ : Tuple = os.path.basename(lowercase )
UpperCAmelCase_ : Optional[int] = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(lowercase , """r""" , encoding="""utf-8""" ) as f, open(lowercase , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
UpperCAmelCase_ : Optional[int] = compressed_file_paths[protocol]
UpperCAmelCase_ : Union[str, Any] = """dataset.jsonl"""
UpperCAmelCase_ : str = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
UpperCAmelCase_ ,*UpperCAmelCase_ : List[Any] = fsspec.get_fs_token_paths(lowercase )
assert fs.isfile(lowercase )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = hf_api.dataset_info(lowercase , token=lowercase )
UpperCAmelCase_ : Any = HfFileSystem(repo_info=lowercase , token=lowercase )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(lowercase ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def A_ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowercase , lowercase , clobber=lowercase )
with pytest.warns(lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowercase ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 470
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def A_ ( ) -> str:
"""simple docstring"""
UpperCAmelCase_ : int = 10
UpperCAmelCase_ : Optional[Any] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowercase ) ),
} , features=lowercase , )
return dataset
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase )
return filename
# FILE_CONTENT + files
lowercase_ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
UpperCAmelCase_ : List[str] = bytes(lowercase , """utf-8""" )
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> List[str]:
"""simple docstring"""
import gzip
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
UpperCAmelCase_ : Optional[Any] = bytes(lowercase , """utf-8""" )
with gzip.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
UpperCAmelCase_ : str = bytes(lowercase , """utf-8""" )
with lza.frame.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase , """w""" ) as archive:
archive.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
import lzma
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
UpperCAmelCase_ : List[str] = bytes(lowercase , """utf-8""" )
with lzma.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Any:
"""simple docstring"""
import zipfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
UpperCAmelCase_ : Optional[int] = bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
UpperCAmelCase_ : int = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase , """w""" ) as f:
f.write(lowercase )
return filename
lowercase_ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
lowercase_ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
lowercase_ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
lowercase_ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
lowercase_ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Union[str, Any]:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : str = datasets.Dataset.from_dict(lowercase )
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
UpperCAmelCase_ : Tuple = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase , """w""" , newline="""""" ) as f:
UpperCAmelCase_ : List[Any] = csv.DictWriter(lowercase , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
import bza
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase , """rb""" ) as f:
UpperCAmelCase_ : Dict = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
UpperCAmelCase_ : Dict = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase , """wb""" ) as f:
UpperCAmelCase_ : List[str] = pq.ParquetWriter(lowercase , schema=lowercase )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase ) )] for k in DATA[0]} , schema=lowercase )
writer.write_table(lowercase )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase_ : Union[str, Any] = {"""data""": DATA}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCAmelCase_ : Optional[int] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase , """w""" ) as f:
json.dump(lowercase , lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> Dict:
"""simple docstring"""
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> str:
"""simple docstring"""
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase , """rb""" ) as orig_file:
with gzip.open(lowercase , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.basename(lowercase ) )
f.add(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase , """w""" ) as f:
f.add(lowercase , arcname=os.path.join("""nested""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
f.write(lowercase , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase ) ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase , lowercase ) -> str:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase )
return path
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Optional[int]:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def A_ ( ) -> Tuple:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def A_ ( lowercase , lowercase ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase , """w""" ) as f:
f.write(lowercase , arcname=os.path.basename(lowercase ) )
f.write(lowercase , arcname=os.path.basename(lowercase ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 470
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
@dataclass(frozen=UpperCamelCase )
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = 42
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->List[str]:
__SCREAMING_SNAKE_CASE : int = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
UpperCamelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE : str = cached_features_file + ".lock"
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(UpperCamelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
)
logger.info("Training examples: %s" , len(UpperCamelCase ) )
__SCREAMING_SNAKE_CASE : Any = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
logger.info("Saving features into cached file %s" , UpperCamelCase )
torch.save(self.features , UpperCamelCase )
def __len__( self : Tuple )->Any:
return len(self.features )
def __getitem__( self : Union[str, Any] , UpperCamelCase : int )->InputFeatures:
return self.features[i]
def __snake_case ( self : int )->Tuple:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase = 42
def __init__( self : Dict , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 1_2_8 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , )->str:
__SCREAMING_SNAKE_CASE : str = hans_processors[task]()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE : Union[str, Any] = label_list
__SCREAMING_SNAKE_CASE : str = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__SCREAMING_SNAKE_CASE : List[str] = tf.data.Dataset.from_generator(
UpperCamelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __snake_case ( self : List[Any] )->str:
return self.dataset
def __len__( self : Tuple )->List[str]:
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase : Tuple )->InputFeatures:
return self.features[i]
def __snake_case ( self : List[Any] )->Optional[int]:
return self.label_list
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __snake_case ( self : List[Any] , UpperCamelCase : Union[str, Any] )->Tuple:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_train_set.txt" ) ) , "train" )
def __snake_case ( self : List[str] , UpperCamelCase : Optional[int] )->Any:
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def __snake_case ( self : Optional[int] )->Tuple:
return ["contradiction", "entailment", "neutral"]
def __snake_case ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Any )->Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = []
for i, line in enumerate(UpperCamelCase ):
if i == 0:
continue
__SCREAMING_SNAKE_CASE : str = "%s-%s" % (set_type, line[0])
__SCREAMING_SNAKE_CASE : List[str] = line[5]
__SCREAMING_SNAKE_CASE : List[str] = line[6]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
__SCREAMING_SNAKE_CASE : Optional[Any] = line[0]
examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
return examples
def _lowerCAmelCase ( __lowerCamelCase : List[InputExample] , __lowerCamelCase : List[str] , __lowerCamelCase : int , __lowerCamelCase : PreTrainedTokenizer , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(__lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = []
for ex_index, example in tqdm.tqdm(enumerate(__lowerCamelCase ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , truncation=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , )
__SCREAMING_SNAKE_CASE : List[str] = label_map[example.label] if example.label in label_map else 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**__lowerCamelCase , label=__lowerCamelCase , pairID=__lowerCamelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_lowerCamelCase = {
"""hans""": 3,
}
_lowerCamelCase = {
"""hans""": HansProcessor,
}
| 447
| 1
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ) -> Any:
UpperCamelCase :Optional[Any] = parent
UpperCamelCase :Union[str, Any] = batch_size
UpperCamelCase :Optional[Any] = seq_length
UpperCamelCase :List[Any] = is_training
UpperCamelCase :List[Any] = use_attention_mask
UpperCamelCase :Any = use_token_type_ids
UpperCamelCase :Tuple = use_labels
UpperCamelCase :Any = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[str] = num_hidden_layers
UpperCamelCase :Optional[int] = num_attention_heads
UpperCamelCase :Tuple = intermediate_size
UpperCamelCase :Optional[int] = hidden_act
UpperCamelCase :Tuple = hidden_dropout_prob
UpperCamelCase :List[str] = attention_probs_dropout_prob
UpperCamelCase :Union[str, Any] = max_position_embeddings
UpperCamelCase :Dict = type_vocab_size
UpperCamelCase :Dict = type_sequence_label_size
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Optional[int] = num_choices
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[Any] = None
if self.use_attention_mask:
UpperCamelCase :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase :int = None
if self.use_token_type_ids:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase :str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase :Union[str, Any] = config_and_inputs
UpperCamelCase :Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase :Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase :Optional[Any] = config_and_inputs
UpperCamelCase :List[Any] = True
UpperCamelCase :List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase :Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] =True
UpperCamelCase_ : Any =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Optional[int] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCamelCase :Union[str, Any] = model_class_name.from_pretrained('''roberta-base''' , from_pt=_SCREAMING_SNAKE_CASE )
UpperCamelCase :List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
| 658
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase : Optional[int] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
lowercase : Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
lowercase : Optional[Any] = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
lowercase : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
lowercase : Optional[int] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=[1, 10, 100] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3.0 ) -> Optional[int]:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
snake_case_ : Optional[Any] = []
snake_case_ : str = Counter()
snake_case_ : int = 0
snake_case_ : Tuple = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
snake_case_ : List[str] = candidate + "\n" + test_case
snake_case_ : str = (test_program, timeout, task_id, completion_id[task_id])
snake_case_ : Optional[int] = executor.submit(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
snake_case_ : int = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
snake_case_ , snake_case_ : str = [], []
for result in results.values():
result.sort()
snake_case_ : Optional[int] = [r[1]["passed"] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
snake_case_ : int = np.array(_SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = k
snake_case_ : Optional[Any] = {f'''pass@{k}''': estimate_pass_at_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def lowerCAmelCase__ ( _a : Any , _a : Tuple , _a : Dict ):
def estimator(_a : int , _a : int , _a : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_a , _a ):
snake_case_ : Dict = itertools.repeat(_a , len(_a ) )
else:
assert len(_a ) == len(_a )
snake_case_ : int = iter(_a )
return np.array([estimator(int(_a ) , int(_a ) , _a ) for n, c in zip(_a , _a )] )
| 568
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
def UpperCamelCase_( ) -> int:
_lowercase : Any = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=lowerCamelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=lowerCamelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=lowerCamelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=lowerCamelCase_ , default='data/dump' , help='The dump file prefix.' )
_lowercase : Tuple = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
_lowercase : List[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_lowercase : Union[str, Any] = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowercase : str = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_lowercase : Optional[Any] = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowercase : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowercase : List[str] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_lowercase : Optional[int] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_lowercase : str = fp.readlines()
logger.info('Start encoding' )
logger.info(F'''{len(lowerCamelCase_ )} examples to process.''' )
_lowercase : int = []
_lowercase : Union[str, Any] = 0
_lowercase : Dict = 1_0000
_lowercase : int = time.time()
for text in data:
_lowercase : Optional[Any] = F'''{bos} {text.strip()} {sep}'''
_lowercase : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
rslt.append(lowerCamelCase_ )
iter += 1
if iter % interval == 0:
_lowercase : Optional[int] = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
_lowercase : str = time.time()
logger.info('Finished binarization' )
logger.info(F'''{len(lowerCamelCase_ )} examples processed.''' )
_lowercase : int = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
_lowercase : Tuple = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowercase : Optional[Any] = [np.uintaa(lowerCamelCase_ ) for d in rslt]
else:
_lowercase : Any = [np.intaa(lowerCamelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(lowerCamelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , lowerCamelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 717
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase="None", lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : Optional[int] = use_token_type_ids
_lowercase : str = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Any = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : int = max_position_embeddings
_lowercase : Any = type_vocab_size
_lowercase : Tuple = type_sequence_label_size
_lowercase : List[Any] = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Tuple = num_choices
_lowercase : Dict = relative_attention
_lowercase : Optional[int] = position_biased_input
_lowercase : str = pos_att_type
_lowercase : Optional[Any] = scope
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Union[str, Any] = None
if self.use_input_mask:
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
_lowercase : str = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()), [])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = DebertaVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : Optional[int] = model(lowerCamelCase, token_type_ids=lowerCamelCase)[0]
_lowercase : Dict = model(lowerCamelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()), [self.batch_size, self.seq_length, self.hidden_size])
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : List[Any] = DebertaVaForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.num_labels
_lowercase : Any = DebertaVaForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertListEqual(list(result.logits.size()), [self.batch_size, self.num_labels])
self.check_loss_output(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Optional[int] = DebertaVaForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = DebertaVaForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = DebertaVaForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[Any] = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : str = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Any = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : List[str] = config_and_inputs
_lowercase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Any = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : int = True
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : str = False
lowercase_ : List[Any] = False
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[Any] = DebertaVaModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = DebertaVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Dict = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge')
_lowercase : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_lowercase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_lowercase : Tuple = model(lowerCamelCase, attention_mask=lowerCamelCase)[0]
# compare the actual values for a slice.
_lowercase : int = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], lowerCamelCase, atol=1E-4), F'''{output[:, 1:4, 1:4]}''')
| 354
| 0
|
"""simple docstring"""
def UpperCAmelCase ( A : Dict , A : str ):
'''simple docstring'''
_UpperCAmelCase = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase ( A : Tuple , A : Any ):
'''simple docstring'''
_UpperCAmelCase = ''
for i in range(len(A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase ( A : Optional[Any] , A : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = int('0b' + data[0] + data[-1] , 2 )
_UpperCAmelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase ( A : Tuple , A : List[str] , A : Optional[Any] , A : Optional[int] , A : Any ):
'''simple docstring'''
_UpperCAmelCase = message[:4]
_UpperCAmelCase = message[4:]
_UpperCAmelCase = apply_table(A , A )
_UpperCAmelCase = xor(A , A )
_UpperCAmelCase = apply_sbox(A , temp[:4] ) # noqa: E741
_UpperCAmelCase = apply_sbox(A , temp[4:] )
_UpperCAmelCase = '0' * (2 - len(A )) + l # noqa: E741
_UpperCAmelCase = '0' * (2 - len(A )) + r
_UpperCAmelCase = apply_table(l + r , A )
_UpperCAmelCase = xor(A , A )
return temp + right
if __name__ == "__main__":
lowercase = input('''Enter 10 bit key: ''')
lowercase = input('''Enter 8 bit message: ''')
lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase = [2, 4, 3, 1]
lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase = apply_table(key, paa_table)
lowercase = temp[:5]
lowercase = temp[5:]
lowercase = left_shift(left)
lowercase = left_shift(right)
lowercase = apply_table(left + right, pa_table)
lowercase = left_shift(left)
lowercase = left_shift(right)
lowercase = left_shift(left)
lowercase = left_shift(right)
lowercase = apply_table(left + right, pa_table)
# encryption
lowercase = apply_table(message, IP)
lowercase = function(expansion, sa, sa, keya, temp)
lowercase = temp[4:] + temp[:4]
lowercase = function(expansion, sa, sa, keya, temp)
lowercase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase = apply_table(CT, IP)
lowercase = function(expansion, sa, sa, keya, temp)
lowercase = temp[4:] + temp[:4]
lowercase = function(expansion, sa, sa, keya, temp)
lowercase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 573
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''lxmert'''
_UpperCAmelCase = {}
def __init__( self , snake_case=30522 , snake_case=768 , snake_case=12 , snake_case=9500 , snake_case=1600 , snake_case=400 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=9 , snake_case=5 , snake_case=5 , snake_case=2048 , snake_case=4 , snake_case=6.67 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=True , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = num_qa_labels
_UpperCAmelCase = num_object_labels
_UpperCAmelCase = num_attr_labels
_UpperCAmelCase = l_layers
_UpperCAmelCase = x_layers
_UpperCAmelCase = r_layers
_UpperCAmelCase = visual_feat_dim
_UpperCAmelCase = visual_pos_dim
_UpperCAmelCase = visual_loss_normalizer
_UpperCAmelCase = task_matched
_UpperCAmelCase = task_mask_lm
_UpperCAmelCase = task_obj_predict
_UpperCAmelCase = task_qa
_UpperCAmelCase = visual_obj_loss
_UpperCAmelCase = visual_attr_loss
_UpperCAmelCase = visual_feat_loss
_UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 573
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A =logging.get_logger(__name__)
A ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
A ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
A ={
'gpt-neox-20b': 20_48,
}
class _a ( __a ):
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : str=None , lowercase : Tuple=None , lowercase : Dict="<|endoftext|>" , lowercase : str="<|endoftext|>" , lowercase : List[Any]="<|endoftext|>" , lowercase : List[str]=False , **lowercase : int , ):
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase ) != add_prefix_space:
UpperCAmelCase = getattr(lowercase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowercase )
UpperCAmelCase = add_prefix_space
def A ( self : Tuple , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A ( self : List[str] , lowercase : "Conversation" ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase , add_special_tokens=lowercase ) + [self.eos_token_id] )
if len(lowercase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 358
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def snake_case_ (_a : List[str] ):
UpperCAmelCase = {}
UpperCAmelCase = job['''started_at''']
UpperCAmelCase = job['''completed_at''']
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = date_parser.parse(_a )
UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase = start
UpperCAmelCase = end
UpperCAmelCase = duration_in_min
return job_info
def snake_case_ (_a : str , _a : List[str]=None ):
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"}
UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase = requests.get(_a , headers=_a ).json()
UpperCAmelCase = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(_a ):
UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_a ).json()
job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A =parser.parse_args()
A =get_job_time(args.workflow_run_id)
A =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 358
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class _lowercase :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple=99 , SCREAMING_SNAKE_CASE_ : Dict=32 , SCREAMING_SNAKE_CASE_ : str=5 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=50 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> Dict:
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = use_labels
__snake_case = scope
def a ( self : str ) -> Tuple:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a ( self : List[Any] ) -> Optional[int]:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def a ( self : int ) -> Dict:
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.prepare_config_and_inputs()
__snake_case = True
__snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int , ) -> Optional[Any]:
__snake_case = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
__snake_case = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Dict , ) -> List[Any]:
__snake_case = True
__snake_case = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Union[str, Any]:
__snake_case = True
__snake_case = True
__snake_case = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).eval()
# first forward pass
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
__snake_case = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['hidden_states'][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , *SCREAMING_SNAKE_CASE_ : List[Any] , ) -> Any:
__snake_case = BertGenerationDecoder(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self : str ) -> Union[str, Any]:
__snake_case , __snake_case , __snake_case , __snake_case = self.prepare_config_and_inputs()
__snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a ( self : int ) -> Union[str, Any]:
__snake_case = BertGenerationEncoderTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def a ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def a ( self : List[Any] ) -> int:
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
__snake_case = 'bert'
self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a ( self : Union[str, Any] ) -> Union[str, Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def a ( self : Dict ) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case = None
self.model_tester.create_and_check_model_as_decoder(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE_ )
@slow
def a ( self : Optional[Any] ) -> Any:
__snake_case = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def a ( self : Optional[int] ) -> Optional[Any]:
__snake_case = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__snake_case = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def a ( self : List[Any] ) -> Any:
__snake_case = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
__snake_case = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ )[0]
__snake_case = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__snake_case = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 56
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__snake_case = quote(lowercase__ )
return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
| 56
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """tf_padding""" ) )
self.parent.assertTrue(hasattr(__a , """depth_multiplier""" ) )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : Tuple , __a : str=13 , __a : Dict=3 , __a : List[Any]=32 , __a : Any=0.25 , __a : Any=8 , __a : Optional[int]=8 , __a : Optional[int]=6 , __a : Dict=32 , __a : Tuple=True , __a : List[Any]=True , __a : Optional[int]=True , __a : Tuple="relu6" , __a : Optional[Any]=1280 , __a : str=0.1 , __a : str=0.02 , __a : Optional[Any]=True , __a : Tuple=True , __a : Dict=10 , __a : Optional[Any]=None , ) -> Any:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Tuple = batch_size
__lowercase : Dict = num_channels
__lowercase : Optional[int] = image_size
__lowercase : int = depth_multiplier
__lowercase : str = depth_divisible_by
__lowercase : int = min_depth
__lowercase : Tuple = expand_ratio
__lowercase : Optional[int] = tf_padding
__lowercase : Dict = output_stride
__lowercase : Dict = first_layer_is_expansion
__lowercase : Optional[Any] = finegrained_output
__lowercase : str = hidden_act
__lowercase : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
__lowercase : Optional[int] = classifier_dropout_prob
__lowercase : int = use_labels
__lowercase : Optional[int] = is_training
__lowercase : Dict = num_labels
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : List[Any] = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , __a : Optional[int] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = MobileNetVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCAmelCase ( self : List[str] , __a : Optional[int] , __a : List[str] , __a : str , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.num_labels
__lowercase : Dict = MobileNetVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : List[str] , __a : Tuple , __a : Any , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : List[Any] = MobileNetVaForSemanticSegmentation(__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase : str = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Optional[int] = False
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = MobileNetVaModelTester(self )
__lowercase : int = MobileNetVaConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV2 does not output attentions""" )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : int = [*signature.parameters.keys()]
__lowercase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__a : List[Any] , __a : Tuple , __a : List[str] ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Tuple = outputs.hidden_states
__lowercase : str = 16
self.assertEqual(len(__a ) , __a )
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Any = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Union[str, Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = MobileNetVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v2_1.0_224""" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Tuple = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v2_1.0_224""" ).to(__a )
__lowercase : str = self.default_image_processor
__lowercase : Tuple = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : str = model(**__a )
# verify the logits
__lowercase : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : int = MobileNetVaForSemanticSegmentation.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : Dict = model.to(__a )
__lowercase : Tuple = MobileNetVaImageProcessor.from_pretrained("""google/deeplabv3_mobilenet_v2_1.0_513""" )
__lowercase : List[str] = prepare_img()
__lowercase : Optional[int] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Any = outputs.logits
# verify the logits
__lowercase : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __a )
__lowercase : str = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1E-4 ) )
| 649
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase ( __a , __a ):
'''simple docstring'''
_A : str = 1
@register_to_config
def __init__( self : Optional[int] , __a : Tuple=2000 , __a : List[str]=0.1 , __a : str=20 , __a : Optional[int]=1E-3 ) -> int:
"""simple docstring"""
__lowercase : Tuple = None
__lowercase : Union[str, Any] = None
__lowercase : int = None
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Union[str, torch.device] = None ) -> str:
"""simple docstring"""
__lowercase : List[str] = torch.linspace(1 , self.config.sampling_eps , __a , device=__a )
def lowerCAmelCase ( self : Tuple , __a : List[Any] , __a : Tuple , __a : int , __a : Optional[int]=None ) -> str:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowercase : Dict = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowercase : int = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowercase : Union[str, Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowercase : Optional[Any] = std.unsqueeze(-1 )
__lowercase : List[Any] = -score / std
# compute
__lowercase : Dict = -1.0 / len(self.timesteps )
__lowercase : int = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowercase : List[Any] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowercase : Union[str, Any] = beta_t.unsqueeze(-1 )
__lowercase : List[str] = -0.5 * beta_t * x
__lowercase : int = torch.sqrt(__a )
__lowercase : Union[str, Any] = drift - diffusion**2 * score
__lowercase : Optional[Any] = x + drift * dt
# add noise
__lowercase : List[str] = randn_tensor(x.shape , layout=x.layout , generator=__a , device=x.device , dtype=x.dtype )
__lowercase : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.config.num_train_timesteps
| 649
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
class _A ( UpperCAmelCase__ ):
'''simple docstring'''
__lowerCamelCase : Tuple = "encoder-decoder"
__lowerCamelCase : Union[str, Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
snake_case : str = kwargs.pop("""encoder""" )
snake_case : Optional[Any] = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Dict = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case : List[str] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : str = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Union[str, Any] = True
snake_case : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = copy.deepcopy(self.__dict__ )
snake_case : Tuple = self.encoder.to_dict()
snake_case : Dict = self.decoder.to_dict()
snake_case : Optional[Any] = self.__class__.model_type
return output
| 36
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 697
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=3 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=1 / 255 , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Dict = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = num_channels
__a : Dict = min_resolution
__a : List[Any] = max_resolution
__a : int = do_resize
__a : str = size
__a : Optional[Any] = do_rescale
__a : Optional[Any] = rescale_factor
__a : str = do_normalize
__a : Any = image_mean
__a : Optional[Any] = image_std
__a : Dict = do_pad
def __lowerCamelCase ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
__a , __a : Tuple = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
__a : Tuple = self.size["""shortest_edge"""]
elif w > h:
__a : Optional[Any] = self.size["""shortest_edge"""]
__a : Any = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Any = self.size["""shortest_edge"""]
__a : Optional[int] = self.size["""shortest_edge"""]
else:
__a : Any = []
for image in image_inputs:
__a , __a : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[0] )[0]
__a : Optional[Any] = max(__UpperCamelCase , key=lambda __UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , unittest.TestCase ):
lowercase__ = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : str = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_pad""" ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
__a : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
__a : Any = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
__a : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
__a : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[str] = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : Dict = json.loads(f.read() )
__a : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify orig_size
__a : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : Tuple = json.loads(f.read() )
__a : str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__a : int = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__a : Tuple = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__a : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __UpperCamelCase )
__a : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
__a : Optional[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __UpperCamelCase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __UpperCamelCase )
__a : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
__a : List[str] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __UpperCamelCase ) )
# verify is_crowd
__a : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __UpperCamelCase ) )
# verify class_labels
__a : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __UpperCamelCase ) )
# verify masks
__a : Union[str, Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __UpperCamelCase )
# verify orig_size
__a : str = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __UpperCamelCase ) )
# verify size
__a : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __UpperCamelCase ) )
| 697
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase : int = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase : Tuple = {
'squeezebert/squeezebert-uncased': 5_12,
'squeezebert/squeezebert-mnli': 5_12,
'squeezebert/squeezebert-mnli-headless': 5_12,
}
UpperCamelCase : Dict = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = SqueezeBertTokenizer
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,):
super().__init__(
_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,)
lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = strip_accents
lowerCamelCase__ = tokenize_chinese_chars
lowerCamelCase__ = normalizer_class(**_lowerCAmelCase )
lowerCamelCase__ = do_lower_case
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ):
lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ):
lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 50
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4
| 0
|
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__a = TaConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(F'''Building PyTorch model from configuration: {config}''' )
__a = TaForConditionalGeneration(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 201
|
'''simple docstring'''
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE_ = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase_ ( enum.Enum ):
"""simple docstring"""
a_ :List[Any] =0
a_ :Optional[Any] =1
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="" ) -> List[Any]:
"""simple docstring"""
sys.stdout.write(str(__SCREAMING_SNAKE_CASE ) + end )
sys.stdout.flush()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="" ) -> Tuple:
"""simple docstring"""
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , __SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
forceWrite("""\r""" )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowercase ( ) -> int:
"""simple docstring"""
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowercase ( ) -> str:
"""simple docstring"""
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 201
| 1
|
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=0 ):
# Format the message.
if name is None:
__magic_name__ : List[str] =None
else:
__magic_name__ : Optional[Any] =""".""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
__magic_name__ : Optional[int] =fmt.format(lowerCamelCase )
# Print and recurse (if needed).
if isinstance(lowerCamelCase , lowerCamelCase ):
if msg is not None:
print(lowerCamelCase )
for k in val.keys():
recursive_print(lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase , torch.Tensor ):
print(lowerCamelCase , """:""" , val.size() )
else:
print(lowerCamelCase , """:""" , lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
__magic_name__ : Union[str, Any] =param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__magic_name__ : int =(num_heads, hidden_size, num_splits) + input_shape[1:]
__magic_name__ : List[str] =param.view(*lowerCamelCase )
__magic_name__ : List[Any] =param.transpose(0 , 2 )
__magic_name__ : int =param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__magic_name__ : Any =(num_heads, num_splits, hidden_size) + input_shape[1:]
__magic_name__ : int =param.view(*lowerCamelCase )
__magic_name__ : str =param.transpose(0 , 1 ).contiguous()
__magic_name__ : List[Any] =param.view(*lowerCamelCase )
return param
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# The converted output model.
__magic_name__ : Optional[int] ={}
# old versions did not store training args
__magic_name__ : Any =input_state_dict.get("""args""" , lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__magic_name__ : List[str] =ds_args.padded_vocab_size
__magic_name__ : List[str] =ds_args.max_position_embeddings
__magic_name__ : List[Any] =ds_args.hidden_size
__magic_name__ : Union[str, Any] =ds_args.num_layers
__magic_name__ : Tuple =ds_args.num_attention_heads
__magic_name__ : List[str] =ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__magic_name__ : Dict =config.n_head
# The hidden_size per head.
__magic_name__ : int =config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__magic_name__ : Union[str, Any] =input_state_dict["""checkpoint_version"""]
else:
__magic_name__ : Any =0.0
# The model.
__magic_name__ : Dict =input_state_dict["""model"""]
# The language model.
__magic_name__ : Tuple =model["""language_model"""]
# The embeddings.
__magic_name__ : List[str] =lm["""embedding"""]
# The word embeddings.
__magic_name__ : str =embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
__magic_name__ : Dict =word_embeddings[: config.vocab_size, :]
__magic_name__ : Optional[int] =word_embeddings
# The position embeddings.
__magic_name__ : str =embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__magic_name__ : Union[str, Any] =pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
__magic_name__ : Optional[int] =pos_embeddings
# The transformer.
__magic_name__ : Dict =lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
__magic_name__ : List[str] =re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
__magic_name__ : List[Any] ={
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__magic_name__ : List[Any] =layer_re.match(lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__magic_name__ : str =int(m.group(1 ) )
# The name of the operation.
__magic_name__ : Dict =m.group(2 )
# Is it a weight or a bias?
__magic_name__ : Dict =m.group(3 )
# The name of the layer.
__magic_name__ : List[str] =F"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
__magic_name__ : List[Any] ="""ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
__magic_name__ : Optional[int] =val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__magic_name__ : Optional[Any] =torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase , lowerCamelCase )
__magic_name__ : Dict =causal_mask
# Insert a "dummy" tensor for masked_bias.
__magic_name__ : Any =torch.tensor(-1E4 , dtype=torch.floataa )
__magic_name__ : Dict =masked_bias
__magic_name__ : List[str] =fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__magic_name__ : Optional[Any] =out_val.transpose(0 , 1 ).contiguous()
# Store.
__magic_name__ : Any =out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__magic_name__ : Dict =fix_query_key_value_ordering(lowerCamelCase , lowerCamelCase , 3 , lowerCamelCase , lowerCamelCase )
# Store. No change of shape.
__magic_name__ : Union[str, Any] =out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__magic_name__ : Tuple =megatron_to_transformers[op_name]
__magic_name__ : List[str] =val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__magic_name__ : Optional[int] =megatron_to_transformers[op_name]
__magic_name__ : Dict =val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__magic_name__ : Any =transformer["""final_layernorm.weight"""]
__magic_name__ : Tuple =transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
__magic_name__ : int =word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ):
# Create the argument parser.
__magic_name__ : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
__magic_name__ : Any =parser.parse_args()
# Extract the basename.
__magic_name__ : Tuple =os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
__magic_name__ : Optional[Any] =torch.load(lowerCamelCase , map_location="""cpu""" )
else:
__magic_name__ : Dict =torch.load(args.path_to_checkpoint , map_location="""cpu""" )
__magic_name__ : Optional[Any] =input_state_dict.get("""args""" , lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__magic_name__ : Optional[Any] ="""gelu_fast"""
elif ds_args.openai_gelu:
__magic_name__ : Optional[Any] ="""gelu_new"""
else:
__magic_name__ : List[Any] ="""gelu"""
else:
# in the very early days this used to be "gelu_new"
__magic_name__ : Dict ="""gelu_new"""
# Spell out all parameters in case the defaults change.
__magic_name__ : Any =GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase , summary_activation=lowerCamelCase , summary_proj_to_labels=lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase , use_cache=lowerCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
__magic_name__ : Optional[int] =GPTaConfig.from_json_file(args.config_file )
__magic_name__ : Tuple =["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
__magic_name__ : int =convert_megatron_checkpoint(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase , lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__magic_name__ : Union[str, Any] =ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__magic_name__ : List[Any] ="""gpt2"""
elif tokenizer_type == "PretrainedFromHF":
__magic_name__ : Optional[Any] =ds_args.tokenizer_name_or_path
else:
raise ValueError(F"Unrecognized tokenizer_type {tokenizer_type}" )
else:
__magic_name__ : List[Any] ="""gpt2"""
__magic_name__ : Dict =AutoTokenizer.from_pretrained(lowerCamelCase )
__magic_name__ : Tuple =type(lowerCamelCase ).__name__
__magic_name__ : List[Any] =tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase )
# Save tokenizer based on args
print(F"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(lowerCamelCase )
# Store the state_dict to file.
__magic_name__ : Optional[int] =os.path.join(lowerCamelCase , """pytorch_model.bin""" )
print(F"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(lowerCamelCase , lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A = """▁"""
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BigBirdTokenizer
SCREAMING_SNAKE_CASE = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ : Any = self.tokenizer_class(_lowerCamelCase , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = """<s>"""
UpperCAmelCase__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(_lowerCamelCase ) , 1004 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _a (self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ : Dict = tokenizer.tokenize(_lowerCamelCase )
UpperCAmelCase__ : int = rust_tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : int = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : str = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Any = self.get_rust_tokenizer()
UpperCAmelCase__ : List[Any] = tokenizer.encode(_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = BigBirdTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def _a (self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = """Hello World!"""
UpperCAmelCase__ : Union[str, Any] = [65, 18536, 2260, 101, 66]
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase__ : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) )
@require_torch
@slow
def _a (self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase__ : str = """ """.join(_lowerCamelCase )
UpperCAmelCase__ : int = self.big_tokenizer.encode_plus(_lowerCamelCase , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase )
UpperCAmelCase__ : str = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = BigBirdConfig(attention_type="""original_full""" )
UpperCAmelCase__ : Optional[int] = BigBirdModel(_lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCamelCase )
model(**_lowerCamelCase )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCAmelCase__ : int = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = {"""input_ids""": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 182
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase_ ( __a : str , __a : list[str] | None = None ):
'''simple docstring'''
_lowerCamelCase : List[Any] = word_bank or []
# create a table
_lowerCamelCase : int = len(__a ) + 1
_lowerCamelCase : list[list[list[str]]] = []
for _ in range(__a ):
table.append([] )
# seed value
_lowerCamelCase : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__a )] == word:
_lowerCamelCase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__a )]:
combination.reverse()
return table[len(__a )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 349
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Optional[Any] = """camembert"""
def __init__( self , A=3_0522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : List[Any] = num_hidden_layers
_lowerCamelCase : str = num_attention_heads
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Any = type_vocab_size
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Tuple = layer_norm_eps
_lowerCamelCase : Tuple = position_embedding_type
_lowerCamelCase : Dict = use_cache
_lowerCamelCase : Dict = classifier_dropout
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
_lowerCamelCase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase : Any = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 349
| 1
|
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00_00_00 , UpperCAmelCase_ : int = 10 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__lowerCamelCase : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__lowerCamelCase : List[str] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCAmelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
|
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13
| 1
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case : int = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class lowerCamelCase__:
UpperCamelCase : str
UpperCamelCase : Optional[str] = None
UpperCamelCase : Optional[Union[str, int]] = None
UpperCamelCase : Optional[Union[str, int]] = None
UpperCamelCase : Optional[Union[str, int]] = None
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return Version(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return other
raise TypeError(F'''{other} (type {type(__UpperCAmelCase )}) cannot be compared to version.''' )
def __eq__( self , __UpperCAmelCase ):
"""simple docstring"""
try:
__lowercase = self._validate_operand(__UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self._validate_operand(__UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __magic_name__ ( cls , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __magic_name__ ( self ):
"""simple docstring"""
return self.version_str
def lowercase__ ( __UpperCamelCase : Any ):
'''simple docstring'''
__lowercase = _VERSION_REG.match(__UpperCamelCase )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(__UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
return ".".join(str(__UpperCamelCase ) for v in version_tuple )
| 339
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : str = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['PoolFormerFeatureExtractor']
snake_case : List[Any] = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 339
| 1
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Any , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 15
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Any,lowercase_ : int,lowercase_ : Dict )-> Union[str, Any]:
'''simple docstring'''
A__ = params
A__ = np.array(_lowerCAmelCase )
A__ = np.array([len(_lowerCAmelCase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Optional[int],lowercase_ : List[str] )-> Any:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[Any] )-> List[Any]:
'''simple docstring'''
return len(self.lengths )
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
A__ = self.params.max_model_input_size
A__ = self.lengths > max_len
logger.info(F'Splitting {sum(_lowerCAmelCase )} too long sequences.' )
def divide_chunks(lowercase_ : Optional[Any],lowercase_ : Dict ):
return [l[i : i + n] for i in range(0,len(_lowerCAmelCase ),_lowerCAmelCase )]
A__ = []
A__ = []
if self.params.mlm:
A__ , A__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
A__ , A__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ = []
for sub_s in divide_chunks(seq_,max_len - 2 ):
if sub_s[0] != cls_id:
A__ = np.insert(_lowerCAmelCase,0,_lowerCAmelCase )
if sub_s[-1] != sep_id:
A__ = np.insert(_lowerCAmelCase,len(_lowerCAmelCase ),_lowerCAmelCase )
assert len(_lowerCAmelCase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_lowerCAmelCase )
new_tok_ids.extend(_lowerCAmelCase )
new_lengths.extend([len(_lowerCAmelCase ) for l in sub_seqs] )
A__ = np.array(_lowerCAmelCase )
A__ = np.array(_lowerCAmelCase )
def snake_case__ ( self : Optional[int] )-> int:
'''simple docstring'''
A__ = len(self )
A__ = self.lengths > 1_1
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = len(self )
A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ = (unk_occs / self.lengths) < 0.5
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case__ ( self : List[str],lowercase_ : str )-> Any:
'''simple docstring'''
A__ = [t[0] for t in batch]
A__ = [t[1] for t in batch]
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
# Max for paddings
A__ = max(_lowerCAmelCase )
# Pad token ids
if self.params.mlm:
A__ = self.params.special_tok_ids['pad_token']
else:
A__ = self.params.special_tok_ids['unk_token']
A__ = [list(t.astype(_lowerCAmelCase ) ) + [pad_idx] * (max_seq_len_ - len(_lowerCAmelCase )) for t in token_ids]
assert len(tk_ ) == len(_lowerCAmelCase )
assert all(len(_lowerCAmelCase ) == max_seq_len_ for t in tk_ )
A__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ = torch.tensor(_lowerCAmelCase ) # (bs)
return tk_t, lg_t
| 709
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = ReformerTokenizer
lowerCamelCase = ReformerTokenizerFast
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = True
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
A__ = '<s>'
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ )
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'<unk>' )
self.assertEqual(vocab_keys[1],'<s>' )
self.assertEqual(vocab_keys[-1],'j' )
self.assertEqual(len(lowercase_ ),1_0_0_0 )
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 )
def snake_case__ ( self : Any )-> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = 'I was born in 92000, and this is falsé.'
A__ = tokenizer.tokenize(lowercase_ )
A__ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(lowercase_ )
A__ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str,lowercase_ : int=1_5 )-> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ )
# Simple input
A__ = 'This is a simple input'
A__ = ['This is a simple input 1', 'This is a simple input 2']
A__ = ('This is a simple input', 'This is a pair')
A__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Simple input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' )
# Pair input
self.assertRaises(
lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',)
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],)
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
],)
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],)
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
],)
@cached_property
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
A__ = 'Hello World!'
A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@slow
def snake_case__ ( self : Dict )-> Dict:
'''simple docstring'''
A__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A__ = [
1_0_8,
2_6_5,
2_4,
1_1_1,
4,
2_5_8,
1_5_6,
3_5,
2_8,
2_7_5,
3,
2_5_9,
2_9_7,
2_6_0,
8_4,
4,
3_5,
1_1_0,
4_4,
8,
2_5_9,
9_1,
2_6_8,
2_1,
1_1,
2_0_9,
2_7_4,
1_0_9,
2_6_6,
2_7_7,
1_1_7,
8_6,
9_3,
3_1_5,
2_5_8,
2_7_8,
2_5_8,
2_7_7,
2_5_8,
0,
2_5_8,
2_8_8,
2_5_8,
3_1_9,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
0,
2_5_8,
2_8_7,
2_5_8,
3_1_5,
2_5_8,
2_8_9,
2_5_8,
2_7_8,
9_9,
2_6_9,
2_6_6,
2_6_2,
8,
2_5_9,
2_4_1,
4,
2_1_7,
2_3_0,
2_6_8,
2_6_6,
5_5,
1_6_8,
1_0_6,
7_5,
1_9_3,
2_6_6,
2_2_3,
2_7,
4_9,
2_6,
2_8_2,
2_5,
2_6_4,
2_9_9,
1_9,
2_6,
0,
2_5_8,
2_7_7,
1_1_7,
8_6,
9_3,
1_7_6,
1_8_3,
2_7_0,
1_1,
2_6_2,
4_2,
6_1,
2_6_5,
]
self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
A__ = ' '.join(lowercase_ )
A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' )
A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' )
A__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
A__ = encoded_sequence['input_ids'].shape
A__ = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
A__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
| 586
| 0
|
def _lowerCAmelCase ( __magic_name__ :int = 1_0 , __magic_name__ :int = 1_0_0_0 , __magic_name__ :bool = True ):
assert (
isinstance(__magic_name__ , __magic_name__ )
and isinstance(__magic_name__ , __magic_name__ )
and isinstance(__magic_name__ , __magic_name__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :int ):
return int((number_a + number_a) / 2 )
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :int , __magic_name__ :int ):
assert (
isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ ) and isinstance(__magic_name__ , __magic_name__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__magic_name__ :int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
UpperCAmelCase_ = lower
UpperCAmelCase_ = higher
UpperCAmelCase_ = []
while True:
UpperCAmelCase_ = get_avg(__magic_name__ , __magic_name__ )
last_numbers.append(__magic_name__ )
if answer(__magic_name__ ) == "low":
UpperCAmelCase_ = number
elif answer(__magic_name__ ) == "high":
UpperCAmelCase_ = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = int(input('''Enter lower value : ''' ).strip() )
UpperCAmelCase_ = int(input('''Enter high value : ''' ).strip() )
UpperCAmelCase_ = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 121
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['MobileViTFeatureExtractor']
_lowerCamelCase : Union[str, Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
| 1
|
'''simple docstring'''
def snake_case__ ( a = 100_0000 ) -> int:
'''simple docstring'''
snake_case__ = limit + 1
snake_case__ = [0] * limit
for first_term in range(1 , a ):
for n in range(a , a , a ):
snake_case__ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
snake_case__ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 566
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case__ ( a ) -> Optional[int]:
'''simple docstring'''
return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
snake_case__ = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a )
snake_case__ = parser.add_subparsers(help="""datasets-cli command helpers""" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a )
EnvironmentCommand.register_subcommand(a )
TestCommand.register_subcommand(a )
RunBeamCommand.register_subcommand(a )
DummyDataCommand.register_subcommand(a )
# Parse args
snake_case__ , snake_case__ = parser.parse_known_args()
if not hasattr(a , """func""" ):
parser.print_help()
exit(1 )
snake_case__ = parse_unknown_args(a )
# Run
snake_case__ = args.func(a , **a )
service.run()
if __name__ == "__main__":
main()
| 566
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = VQModel
_A : str = '''sample'''
@property
def lowerCAmelCase ( self : str , __a : Union[str, Any]=(32, 32) ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = 4
__lowercase : Dict = 3
__lowercase : int = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Optional[Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__a )
__lowercase : int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowercase : List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowercase : Union[str, Any] = image.to(__a )
with torch.no_grad():
__lowercase : str = model(__a ).sample
__lowercase : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowercase : int = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
| 149
|
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , __a : List[str] , __a : str=7 , __a : Union[str, Any]=3 , __a : Optional[int]=18 , __a : Dict=30 , __a : Dict=400 , __a : int=True , __a : Dict=None , __a : Optional[int]=True , ) -> Tuple:
"""simple docstring"""
__lowercase : int = size if size is not None else {"""height""": 18, """width""": 18}
__lowercase : Any = parent
__lowercase : List[Any] = batch_size
__lowercase : Tuple = num_channels
__lowercase : Dict = image_size
__lowercase : Optional[Any] = min_resolution
__lowercase : str = max_resolution
__lowercase : Optional[Any] = do_resize
__lowercase : Optional[Any] = size
__lowercase : List[str] = do_normalize
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """clusters""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : Any = self.image_processing_class(**self.image_processor_dict )
__lowercase : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , obj[key] ) )
else:
self.assertEqual(obj[key] , __a )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : str = os.path.join(__a , """image_processor.json""" )
image_processor_first.to_json_file(__a )
__lowercase : Optional[Any] = self.image_processing_class.from_json_file(__a ).to_dict()
__lowercase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__a )
__lowercase : List[Any] = self.image_processing_class.from_pretrained(__a ).to_dict()
__lowercase : int = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , __a )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case_ ( ):
__lowercase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
__lowercase : Optional[Any] = Image.open(dataset[4]["""file"""] )
__lowercase : Union[str, Any] = Image.open(dataset[5]["""file"""] )
__lowercase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
__lowercase : int = prepare_images()
# test non-batched
__lowercase : List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__lowercase : Tuple = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , __a )
# test batched
__lowercase : List[str] = image_processing(__a , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__lowercase : Optional[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , __a )
| 149
| 1
|
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : str=None ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=SCREAMING_SNAKE_CASE_ , generator_tokenizer=SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , init_retrieval=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = None
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
SCREAMING_SNAKE_CASE : Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE : Any = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE : Tuple = dist.new_group(ranks=SCREAMING_SNAKE_CASE_ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str=torch.floataa ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.empty(SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
dist.scatter(SCREAMING_SNAKE_CASE_ , src=0 , scatter_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
return target_tensor
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE : str = next((addr for addr in addrs if addr.startswith("""e""" )) , SCREAMING_SNAKE_CASE_ )
return ifname
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : int ):
'''simple docstring'''
if not dist.is_initialized():
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self._main_retrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
# distributed training
SCREAMING_SNAKE_CASE : str = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE : List[Any] = None
if self._is_main():
SCREAMING_SNAKE_CASE : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(SCREAMING_SNAKE_CASE_ )]
dist.gather(torch.tensor(SCREAMING_SNAKE_CASE_ ) , dst=0 , gather_list=SCREAMING_SNAKE_CASE_ , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE : Union[str, Any] = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = []
if self._is_main():
assert len(SCREAMING_SNAKE_CASE_ ) == world_size
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(SCREAMING_SNAKE_CASE_ ).numpy() , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_ ), torch.tensor(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : Any = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = self._chunk_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE : str = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE : Tuple = self._scattered(SCREAMING_SNAKE_CASE_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(SCREAMING_SNAKE_CASE_ )
| 719
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__UpperCAmelCase = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
__UpperCAmelCase = """▁"""
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE__ = []
def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 79
| 0
|
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowerCAmelCase__ : str =logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str
UpperCamelCase__ : List[str]
UpperCamelCase__ : Optional[List[str]]
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : List[int]
UpperCamelCase__ : List[int]
UpperCamelCase__ : Optional[List[int]] = None
UpperCamelCase__ : Optional[List[int]] = None
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''train'''
UpperCamelCase__ : Tuple = '''dev'''
UpperCamelCase__ : List[Any] = '''test'''
class UpperCAmelCase_ :
'''simple docstring'''
@staticmethod
def _A ( _A , _A ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def _A ( _A ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def _A ( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-100 , _A=0 , _A=True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {label: i for i, label in enumerate(_A )}
__SCREAMING_SNAKE_CASE = []
for ex_index, example in enumerate(_A ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _A , len(_A ) )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for word, label in zip(example.words , example.labels ):
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(_A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_A ) > 0:
tokens.extend(_A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add()
if len(_A ) > max_seq_length - special_tokens_count:
__SCREAMING_SNAKE_CASE = tokens[: (max_seq_length - special_tokens_count)]
__SCREAMING_SNAKE_CASE = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__SCREAMING_SNAKE_CASE = [sequence_a_segment_id] * len(_A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__SCREAMING_SNAKE_CASE = [cls_token] + tokens
__SCREAMING_SNAKE_CASE = [pad_token_label_id] + label_ids
__SCREAMING_SNAKE_CASE = [cls_token_segment_id] + segment_ids
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__SCREAMING_SNAKE_CASE = [1 if mask_padding_with_zero else 0] * len(_A )
# Zero-pad up to the sequence length.
__SCREAMING_SNAKE_CASE = max_seq_length - len(_A )
if pad_on_left:
__SCREAMING_SNAKE_CASE = ([pad_token] * padding_length) + input_ids
__SCREAMING_SNAKE_CASE = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__SCREAMING_SNAKE_CASE = ([pad_token_segment_id] * padding_length) + segment_ids
__SCREAMING_SNAKE_CASE = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_A ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_A ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_A ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_A ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__SCREAMING_SNAKE_CASE = None
features.append(
InputFeatures(
input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[InputFeatures]
UpperCamelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.path.join(
_A , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE = cached_features_file + '.lock'
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__SCREAMING_SNAKE_CASE = torch.load(_A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__SCREAMING_SNAKE_CASE = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__SCREAMING_SNAKE_CASE = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _A )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , _A ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : List[InputFeatures]
UpperCamelCase__ : int = -100
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__SCREAMING_SNAKE_CASE = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__SCREAMING_SNAKE_CASE = tf.data.Dataset.from_generator(
_A , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__SCREAMING_SNAKE_CASE = tf.data.Dataset.from_generator(
_A , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self , _A ):
'''simple docstring'''
return self.features[i]
| 148
|
# Algorithm for the pigeonhole sorting
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = min(a__ ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE = max(a__ ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a__ , a__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE = 0
for count in range(a__ ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE = count + min_val
i += 1
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a__ )
print('Sorted order is:' , ' '.join(a__ ) )
if __name__ == "__main__":
main()
| 148
| 1
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _A ( __magic_name__ ):
random.seed(__magic_name__ )
np.random.seed(__magic_name__ )
torch.manual_seed(__magic_name__ )
torch.cuda.manual_seed_all(__magic_name__ )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase :
def __init__( self :str , _lowercase :Iterable[torch.nn.Parameter] , _lowercase :float = 0.9999 , _lowercase :float = 0.0 , _lowercase :int = 0 , _lowercase :bool = False , _lowercase :Union[float, int] = 1.0 , _lowercase :Union[float, int] = 2 / 3 , _lowercase :Optional[Any] = None , _lowercase :Dict[str, Any] = None , **_lowercase :Any , ):
'''simple docstring'''
if isinstance(_lowercase , torch.nn.Module ):
lowercase__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , _lowercase , standard_warn=_lowercase , )
lowercase__ = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowercase__ = True
if kwargs.get("max_value" , _lowercase ) is not None:
lowercase__ = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , _lowercase , standard_warn=_lowercase )
lowercase__ = kwargs["max_value"]
if kwargs.get("min_value" , _lowercase ) is not None:
lowercase__ = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , _lowercase , standard_warn=_lowercase )
lowercase__ = kwargs["min_value"]
lowercase__ = list(_lowercase )
lowercase__ = [p.clone().detach() for p in parameters]
if kwargs.get("device" , _lowercase ) is not None:
lowercase__ = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , _lowercase , standard_warn=_lowercase )
self.to(device=kwargs["device"] )
lowercase__ = None
lowercase__ = decay
lowercase__ = min_decay
lowercase__ = update_after_step
lowercase__ = use_ema_warmup
lowercase__ = inv_gamma
lowercase__ = power
lowercase__ = 0
lowercase__ = None # set in `step()`
lowercase__ = model_cls
lowercase__ = model_config
@classmethod
def UpperCAmelCase ( cls :List[Any] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = model_cls.load_config(_lowercase , return_unused_kwargs=_lowercase )
lowercase__ = model_cls.from_pretrained(_lowercase )
lowercase__ = cls(model.parameters() , model_cls=_lowercase , model_config=model.config )
ema_model.load_state_dict(_lowercase )
return ema_model
def UpperCAmelCase ( self :Optional[int] , _lowercase :str ):
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
lowercase__ = self.model_cls.from_config(self.model_config )
lowercase__ = self.state_dict()
state_dict.pop("shadow_params" , _lowercase )
model.register_to_config(**_lowercase )
self.copy_to(model.parameters() )
model.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :int ):
'''simple docstring'''
lowercase__ = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowercase__ = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowercase__ = (1 + step) / (10 + step)
lowercase__ = min(_lowercase , self.decay )
# make sure decay is not smaller than min_decay
lowercase__ = max(_lowercase , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase ( self :List[Any] , _lowercase :Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if isinstance(_lowercase , torch.nn.Module ):
lowercase__ = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , _lowercase , standard_warn=_lowercase , )
lowercase__ = parameters.parameters()
lowercase__ = list(_lowercase )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowercase__ = self.get_decay(self.optimization_step )
lowercase__ = decay
lowercase__ = 1 - decay
lowercase__ = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _lowercase ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowercase__ = deepspeed.zero.GatheredParameters(_lowercase , modifier_rank=_lowercase )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_lowercase )
def UpperCAmelCase ( self :str , _lowercase :Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase__ = list(_lowercase )
for s_param, param in zip(self.shadow_params , _lowercase ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase ( self :List[Any] , _lowercase :List[Any]=None , _lowercase :Any=None ):
'''simple docstring'''
lowercase__ = [
p.to(device=_lowercase , dtype=_lowercase ) if p.is_floating_point() else p.to(device=_lowercase )
for p in self.shadow_params
]
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Iterable[torch.nn.Parameter] ):
'''simple docstring'''
lowercase__ = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase ( self :List[str] , _lowercase :Iterable[torch.nn.Parameter] ):
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , _lowercase ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowercase__ = None
def UpperCAmelCase ( self :Optional[Any] , _lowercase :dict ):
'''simple docstring'''
lowercase__ = copy.deepcopy(_lowercase )
lowercase__ = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
lowercase__ = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , _lowercase ):
raise ValueError("Invalid min_decay" )
lowercase__ = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , _lowercase ):
raise ValueError("Invalid optimization_step" )
lowercase__ = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , _lowercase ):
raise ValueError("Invalid update_after_step" )
lowercase__ = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _lowercase ):
raise ValueError("Invalid use_ema_warmup" )
lowercase__ = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
lowercase__ = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
lowercase__ = state_dict.get("shadow_params" , _lowercase )
if shadow_params is not None:
lowercase__ = shadow_params
if not isinstance(self.shadow_params , _lowercase ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_lowercase , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 713
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_snake_case = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _A ( __magic_name__ , __magic_name__ , __magic_name__=None ):
if rng is None:
lowercase__ = random.Random()
lowercase__ = 1
for dim in shape:
total_dims *= dim
lowercase__ = []
for _ in range(__magic_name__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase__ = np.array(__magic_name__ , dtype=jnp.intaa ).reshape(__magic_name__ )
return output
def _A ( __magic_name__ , __magic_name__=None ):
lowercase__ = ids_tensor(__magic_name__ , vocab_size=2 , rng=__magic_name__ )
# make sure that at least one token is attended to for each batch
lowercase__ = 1
return attn_mask
@require_flax
class lowerCAmelCase :
__lowerCamelCase = None
__lowerCamelCase = ()
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase__ = 2
lowercase__ = inputs["input_ids"].shape[-1] // 2
lowercase__ = inputs["input_ids"][:max_batch_size, :sequence_length]
lowercase__ = jnp.ones_like(_lowercase )
lowercase__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 0
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase__ = getattr(_lowercase , _lowercase )
lowercase__ = pt_model_class(_lowercase ).eval()
lowercase__ = load_flax_weights_in_pytorch_model(_lowercase , flax_model.params )
lowercase__ = flax_model.generate(_lowercase ).sequences
lowercase__ = pt_model.generate(torch.tensor(_lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = False
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 2
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = True
lowercase__ = max_length
lowercase__ = 0.8
lowercase__ = 10
lowercase__ = 0.3
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
lowercase__ = max_length
lowercase__ = 2
lowercase__ = 1
lowercase__ = 8
lowercase__ = 9
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = False
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = True
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase__ = attention_mask.at[(0, 0)].set(0 )
lowercase__ = 2
lowercase__ = max_length
for model_class in self.all_generative_model_classes:
lowercase__ = model_class(_lowercase )
lowercase__ = model.generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _lowercase )
lowercase__ = jit(model.generate )
lowercase__ = jit_generate(_lowercase , attention_mask=_lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
lowercase__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
lowercase__ = "Hello world"
lowercase__ = tokenizer(_lowercase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowercase , "do_samples" ):
model.generate(_lowercase , do_samples=_lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowercase , "foo" ):
lowercase__ = {"foo": "bar"}
model.generate(_lowercase , **_lowercase )
| 611
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( lowercase__ : str , lowercase__ : complex , lowercase__ : str = "x" , lowercase__ : float = 10**-10 , lowercase__ : int = 1 , ) -> complex:
UpperCamelCase__ :Optional[int] = symbols(lowercase__ )
UpperCamelCase__ :Dict = lambdify(lowercase__ , lowercase__ )
UpperCamelCase__ :Any = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
UpperCamelCase__ :List[str] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
UpperCamelCase__ :List[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ :Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 45
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
UpperCAmelCase = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
UpperCAmelCase = arr[j]
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
for i, outer in enumerate(lowerCAmelCase ):
UpperCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase = inner
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 373
| 0
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
torch.manual_seed(0)
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __UpperCAmelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.dummy_uncond_unet
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=2 , generator=lowercase_ , output_type="numpy" , return_dict=lowercase_)[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
_UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = "google/ncsnpp-celebahq-256"
_UpperCamelCase = UNetaDModel.from_pretrained(lowercase_)
_UpperCamelCase = KarrasVeScheduler()
_UpperCamelCase = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_)
pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
_UpperCamelCase = torch.manual_seed(0)
_UpperCamelCase = pipe(num_inference_steps=20 , generator=lowercase_ , output_type="numpy").images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 82
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter image url: ''').strip()
print(F"Downloading image from {url} ...")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 82
| 1
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase__ = datasets.logging.get_logger(__name__)
lowerCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n"
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=False ,lowercase_=False ,lowercase_=True ,lowercase_=False ,lowercase_="dummy_doc" ) -> str:
"""simple docstring"""
_UpperCamelCase : int = {doc: key_lines}
_UpperCamelCase : int = {doc: sys_lines}
_UpperCamelCase : Optional[Any] = {}
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Dict = 0
_UpperCamelCase : Dict = 0
_UpperCamelCase : List[str] = 0
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Tuple = 0
_UpperCamelCase, _UpperCamelCase : str = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase : Union[str, Any] = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase, _UpperCamelCase : Dict = reader.get_doc_mentions(SCREAMING_SNAKE_CASE__ ,sys_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCamelCase : Any = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE__ ,key_doc_lines[doc] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
if remove_nested:
_UpperCamelCase, _UpperCamelCase : List[Any] = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCamelCase, _UpperCamelCase : Any = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCamelCase : Tuple = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Union[str, Any] = reader.get_mention_assignments(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"Number of resulting singleton clusters in the key "
F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"files, respectively" )
return doc_coref_infos
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = get_coref_infos(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[str] = 0
for name, metric in metrics:
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase : Tuple = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) ,F'''Recall: {recall * 100:.2f}''' ,F''' Precision: {precision * 100:.2f}''' ,F''' F1: {fa * 100:.2f}''' ,)
if conll_subparts_num == 3:
_UpperCamelCase : Union[str, Any] = (conll / 3) * 100
logger.info(F'''CoNLL score: {conll:.2f}''' )
output_scores.update({"conll_score": conll} )
return output_scores
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : List[str] = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
_UpperCamelCase : int = line.split()[5]
if not parse_col == "-":
_UpperCamelCase : Union[str, Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE ( self : str , __a : int , __a : Union[str, Any] , __a : Optional[Any]=True , __a : int=False , __a : Optional[Any]=False , __a : int=False ) -> Tuple:
_UpperCamelCase : List[str] = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
_UpperCamelCase : Any = util.check_gold_parse_annotation(_UpperCamelCase )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use \'min_span\'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCamelCase : Optional[int] = evaluate(
key_lines=_UpperCamelCase , sys_lines=_UpperCamelCase , metrics=_UpperCamelCase , NP_only=_UpperCamelCase , remove_nested=_UpperCamelCase , keep_singletons=_UpperCamelCase , min_span=_UpperCamelCase , )
return score
| 624
|
from __future__ import annotations
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not nums:
raise ValueError('''List is empty''' )
return sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
| 0
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = BarthezTokenizer
lowerCAmelCase = BarthezTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = True
def __a ( self : int ):
super().setUp()
A = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowercase )
A = tokenizer
def __a ( self : Any ):
A = '<pad>'
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def __a ( self : List[str] ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_lowercase ) , 101_122 )
def __a ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __a ( self : str ):
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A = [0, 57, 3_018, 70_307, 91, 2]
A = self.tokenizer(
_lowercase , max_length=len(_lowercase ) , padding=_lowercase , truncation=_lowercase , return_tensors='pt' )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'I was born in 92000, and this is falsé.'
A = tokenizer.tokenize(_lowercase )
A = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
A = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(_lowercase )
A = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def __a ( self : int ):
# fmt: off
A = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_lowercase , )
| 91
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
@staticmethod
def __a ( *_lowercase : int , **_lowercase : Optional[int] ):
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
lowerCAmelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __a ( self : int , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Union[str, Any] ):
A = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __a ( self : Tuple , _lowercase : int , _lowercase : Tuple ):
A = object_detector(examples[0] , threshold=0.0 )
A = len(_lowercase )
self.assertGreater(_lowercase , 0 )
self.assertEqual(
_lowercase , [
{
'score': ANY(_lowercase ),
'label': ANY(_lowercase ),
'box': {'xmin': ANY(_lowercase ), 'ymin': ANY(_lowercase ), 'xmax': ANY(_lowercase ), 'ymax': ANY(_lowercase )},
}
for i in range(_lowercase )
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Optional[int] ):
pass
@require_torch
def __a ( self : Optional[Any] ):
A = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection' )
A = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
A = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def __a ( self : List[str] ):
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
A = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF' )
def __a ( self : Optional[int] ):
pass
@require_torch
@slow
def __a ( self : Optional[Any] ):
A = 0.2
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def __a ( self : Optional[int] ):
A = 2
A = pipeline('zero-shot-object-detection' )
A = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 91
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_snake_case : Optional[Any] = 8
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Dict=BITS ):
'''simple docstring'''
_a = x.device
_a = (x * 255).int().clamp(0 , 255 )
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b c h w -> b c 1 h w''' )
_a = ((x & mask) != 0).float()
_a = rearrange(UpperCamelCase , '''b c d h w -> b (c d) h w''' )
_a = bits * 2 - 1
return bits
def snake_case_ (UpperCamelCase : List[Any] , UpperCamelCase : Any=BITS ):
'''simple docstring'''
_a = x.device
_a = (x > 0).int()
_a = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
_a = rearrange(UpperCamelCase , '''d -> d 1 1''' )
_a = rearrange(UpperCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
_a = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 255).clamp(0.0 , 1.0 )
def snake_case_ (self : Union[str, Any] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = True , UpperCamelCase : Any=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_a = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_a = self.alphas_cumprod[timestep]
_a = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_a = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_a = self._get_variance(UpperCamelCase , UpperCamelCase )
_a = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_a = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_a = model_output.device if torch.is_tensor(UpperCamelCase ) else '''cpu'''
_a = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
_a = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
_a = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def snake_case_ (self : Any , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : str="epsilon" , UpperCamelCase : Dict=None , UpperCamelCase : bool = True , ):
'''simple docstring'''
_a = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_a , _a = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
_a = None
# 1. compute alphas, betas
_a = self.alphas_cumprod[t]
_a = self.alphas_cumprod[t - 1] if t > 0 else self.one
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_a = model_output
else:
raise ValueError(f'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
_a = self.bit_scale
if self.config.clip_sample:
_a = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_a = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_a = 0
if t > 0:
_a = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
_a = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class A ( _a ):
def __init__( self : Any , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , lowerCAmelCase_ : Optional[float] = 1.0 , ) -> int:
"""simple docstring"""
super().__init__()
_a = bit_scale
_a = (
ddim_bit_scheduler_step if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 2_56 , lowerCAmelCase_ : Optional[int] = 50 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Any , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_a = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCAmelCase_ , )
_a = decimal_to_bits(lowerCAmelCase_ ) * self.bit_scale
_a = latents.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_a = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_a = bits_to_decimal(lowerCAmelCase_ )
if output_type == "pil":
_a = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 22
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : Any ,*lowercase_ : Dict ,**lowercase_ : List[str] ):
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' ,lowercase_ ,)
super().__init__(*lowercase_ ,**lowercase_ )
| 450
| 0
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : str ) -> Optional[int]:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ) -> Optional[int]:
_a : List[Any] =tmp_path / """cache"""
_a : str ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Dict =TextDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Tuple:
_a : Any =tmp_path / """cache"""
_a : List[str] ={"""text""": """string"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Any =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : str =TextDatasetReader(_UpperCAmelCase ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase ,_UpperCAmelCase )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ) -> Tuple:
_a : str =tmp_path / """cache"""
_a : Any ={"""text""": """string"""}
_a : Dict =TextDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,split=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase ,_UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" ,[str, list] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ) -> int:
if issubclass(_UpperCAmelCase ,_UpperCAmelCase ):
_a : List[str] =text_path
elif issubclass(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =[text_path]
_a : int =tmp_path / """cache"""
_a : Union[str, Any] ={"""text""": """string"""}
_a : Any =TextDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_text_dataset(_UpperCAmelCase ,_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict=("train",) ) -> List[Any]:
assert isinstance(_UpperCAmelCase ,_UpperCAmelCase )
for split in splits:
_a : Any =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[str] ) -> Any:
_a : Dict =tmp_path / """cache"""
_a : int ={"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : str =TextDatasetReader({"""train""": text_path} ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase ,_UpperCAmelCase )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
_a : Tuple =tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_a : int ={"""text""": """string"""}
_a : Optional[int] =features.copy() if features else default_expected_features
_a : Optional[Any] =(
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Any =TextDatasetReader({"""train""": text_path} ,features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase ,_UpperCAmelCase )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ) -> Dict:
if split:
_a : Dict ={split: text_path}
else:
_a : Any ="""train"""
_a : List[str] ={"""train""": text_path, """test""": text_path}
_a : Optional[int] =tmp_path / """cache"""
_a : Any ={"""text""": """string"""}
_a : int =TextDatasetReader(_UpperCAmelCase ,cache_dir=_UpperCAmelCase ).read()
_check_text_datasetdict(_UpperCAmelCase ,_UpperCAmelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 715
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = (DPMSolverSinglestepScheduler,)
__UpperCamelCase : str = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self :Optional[Any] , **SCREAMING_SNAKE_CASE :int ) -> str:
'''simple docstring'''
_a : Optional[Any] ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Optional[Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Any:
'''simple docstring'''
_a : Any =dict(self.forward_default_kwargs )
_a : Any =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Tuple =self.dummy_sample
_a : Optional[Any] =0.1 * sample
_a : Dict =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Tuple =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : str =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Dict =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
_a : List[str] =dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : str =sample, sample
for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ):
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Union[str, Any] =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :Union[str, Any]=0 , **SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
'''simple docstring'''
_a : List[str] =dict(self.forward_default_kwargs )
_a : Dict =kwargs.pop("""num_inference_steps""" , SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =self.dummy_sample
_a : int =0.1 * sample
_a : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : Optional[int] =self.get_scheduler_config()
_a : str =scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
_a : Union[str, Any] =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
_a : Any =dummy_past_residuals[: new_scheduler.config.solver_order]
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
_a : Tuple =new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :Any=None , **SCREAMING_SNAKE_CASE :List[Any] ) -> Any:
'''simple docstring'''
if scheduler is None:
_a : int =self.scheduler_classes[0]
_a : int =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : int =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =self.scheduler_classes[0]
_a : Union[str, Any] =self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : List[str] =1_0
_a : Optional[Any] =self.dummy_model()
_a : int =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : str =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __UpperCAmelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
_a : int =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =5_0
_a : Optional[Any] =self.dummy_model()
_a : List[Any] =self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a : Union[str, Any] =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
_a : Optional[int] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : List[str] =DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : List[Any] =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : str =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
_a : Dict =DEISMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] =DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : str =UniPCMultistepScheduler.from_config(scheduler.config )
_a : Optional[Any] =DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Dict =self.full_loop(scheduler=SCREAMING_SNAKE_CASE )
_a : List[str] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , algorithm_type="""dpmsolver++""" , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
_a : List[Any] =self.full_loop(
solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , algorithm_type=SCREAMING_SNAKE_CASE , )
assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> str:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
_a : List[Any] =self.full_loop()
_a : Any =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Dict =self.full_loop(use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Optional[int] =self.full_loop(prediction_type="""v_prediction""" )
_a : Optional[Any] =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def __UpperCAmelCase ( self :int ) -> str:
'''simple docstring'''
_a : List[Any] =self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=SCREAMING_SNAKE_CASE )
_a : Dict =torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def __UpperCAmelCase ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : Dict =self.scheduler_classes[0]
_a : str =self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 )
_a : Optional[int] =scheduler_class(**SCREAMING_SNAKE_CASE )
_a : Optional[Any] =1_0
_a : Any =self.dummy_model()
_a : int =self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
_a : Tuple =model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Dict =scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
assert sample.dtype == torch.floataa
| 506
| 0
|
from __future__ import annotations
def lowercase_ ( __snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
snake_case__ :Dict = len(__snake_case )
# We need to create solution object to save path.
snake_case__ :List[str] = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
snake_case__ :Union[str, Any] = run_maze(__snake_case , 0 , 0 , __snake_case )
if solved:
print("\n".join(str(__snake_case ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowercase_ ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ) -> bool:
'''simple docstring'''
snake_case__ :Dict = len(__snake_case )
# Final check point.
if i == j == (size - 1):
snake_case__ :Tuple = 1
return True
snake_case__ :Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
snake_case__ :int = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case__ :List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case__ :Any = 1
# check for directions
if (
run_maze(__snake_case , i + 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j + 1 , __snake_case )
or run_maze(__snake_case , i - 1 , __snake_case , __snake_case )
or run_maze(__snake_case , __snake_case , j - 1 , __snake_case )
):
return True
snake_case__ :str = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Dict = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _A ):
_A = 'camembert'
def __init__( self ,UpperCamelCase=30_522 ,UpperCamelCase=768 ,UpperCamelCase=12 ,UpperCamelCase=12 ,UpperCamelCase=3_072 ,UpperCamelCase="gelu" ,UpperCamelCase=0.1 ,UpperCamelCase=0.1 ,UpperCamelCase=512 ,UpperCamelCase=2 ,UpperCamelCase=0.02 ,UpperCamelCase=1E-12 ,UpperCamelCase=1 ,UpperCamelCase=0 ,UpperCamelCase=2 ,UpperCamelCase="absolute" ,UpperCamelCase=True ,UpperCamelCase=None ,**UpperCamelCase ,) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase ,bos_token_id=UpperCamelCase ,eos_token_id=UpperCamelCase ,**UpperCamelCase )
snake_case__ :Tuple = vocab_size
snake_case__ :Dict = hidden_size
snake_case__ :int = num_hidden_layers
snake_case__ :Union[str, Any] = num_attention_heads
snake_case__ :Optional[int] = hidden_act
snake_case__ :Optional[int] = intermediate_size
snake_case__ :List[str] = hidden_dropout_prob
snake_case__ :List[str] = attention_probs_dropout_prob
snake_case__ :str = max_position_embeddings
snake_case__ :Dict = type_vocab_size
snake_case__ :List[str] = initializer_range
snake_case__ :List[str] = layer_norm_eps
snake_case__ :Tuple = position_embedding_type
snake_case__ :Optional[Any] = use_cache
snake_case__ :str = classifier_dropout
class _snake_case ( _A ):
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ :List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ :int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 241
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowerCAmelCase : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowerCAmelCase : Optional[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase : Optional[Any] = primes[:idx]
break
lowerCAmelCase , lowerCAmelCase : str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase : Any = False
for r in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a__ ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 681
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase ( __lowerCamelCase ):
snake_case_ = """SpeechT5FeatureExtractor"""
snake_case_ = """SpeechT5Tokenizer"""
def __init__( self : Optional[int] ,A : str ,A : Any ):
'''simple docstring'''
super().__init__(A ,A )
def __call__( self : str ,*A : Tuple ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = kwargs.pop("""audio""" ,A )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""text""" ,A )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""text_target""" ,A )
UpperCAmelCase__ : Any = kwargs.pop("""audio_target""" ,A )
UpperCAmelCase__ : List[str] = kwargs.pop("""sampling_rate""" ,A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
elif text is not None:
UpperCAmelCase__ : Optional[int] = self.tokenizer(A ,**A )
else:
UpperCAmelCase__ : Optional[int] = None
if audio_target is not None:
UpperCAmelCase__ : List[Any] = self.feature_extractor(audio_target=A ,*A ,sampling_rate=A ,**A )
UpperCAmelCase__ : int = targets["""input_values"""]
elif text_target is not None:
UpperCAmelCase__ : List[str] = self.tokenizer(A ,**A )
UpperCAmelCase__ : Optional[Any] = targets["""input_ids"""]
else:
UpperCAmelCase__ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase__ : Optional[Any] = labels
UpperCAmelCase__ : Any = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase__ : List[Any] = decoder_attention_mask
return inputs
def __lowercase ( self : Optional[int] ,*A : int ,**A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = kwargs.pop("""input_values""" ,A )
UpperCAmelCase__ : Tuple = kwargs.pop("""input_ids""" ,A )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""labels""" ,A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
UpperCAmelCase__ : List[str] = self.feature_extractor.pad(A ,*A ,**A )
elif input_ids is not None:
UpperCAmelCase__ : Dict = self.tokenizer.pad(A ,**A )
else:
UpperCAmelCase__ : Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(A ,A ) and "input_ids" in labels[0]):
UpperCAmelCase__ : int = self.tokenizer.pad(A ,**A )
UpperCAmelCase__ : Optional[Any] = targets["""input_ids"""]
else:
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor.feature_size
UpperCAmelCase__ : Optional[Any] = self.feature_extractor.num_mel_bins
UpperCAmelCase__ : int = self.feature_extractor.pad(A ,*A ,**A )
UpperCAmelCase__ : List[str] = feature_size_hack
UpperCAmelCase__ : Optional[int] = targets["""input_values"""]
else:
UpperCAmelCase__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase__ : Any = labels
UpperCAmelCase__ : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase__ : List[str] = decoder_attention_mask
return inputs
def __lowercase ( self : str ,*A : int ,**A : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A ,**A )
def __lowercase ( self : int ,*A : Any ,**A : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*A ,**A )
| 65
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : int ):
_snake_case = tempfile.mkdtemp()
_snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_snake_case = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[Any] , **_lowerCamelCase : Tuple ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str , **_lowerCamelCase : Tuple ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : Dict , **_lowerCamelCase : Tuple ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowercase ( self : List[Any] ):
_snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Tuple ):
_snake_case = self.get_tokenizer()
_snake_case = self.get_rust_tokenizer()
_snake_case = self.get_image_processor()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def lowercase ( self : int ):
_snake_case = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_snake_case = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
_snake_case = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = self.prepare_image_inputs()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''np''' )
_snake_case = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : List[str] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = processor(text=_lowerCamelCase )
_snake_case = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case = processor.batch_decode(_lowerCamelCase )
_snake_case = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.get_image_processor()
_snake_case = self.get_tokenizer()
_snake_case = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
_snake_case = '''lower newer'''
_snake_case = self.prepare_image_inputs()
_snake_case = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 224
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowerCamelCase__ : list = []
for char_count in range(_lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 703
|
"""simple docstring"""
from math import pi, sqrt, tan
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
lowerCamelCase__ : Any = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_lowerCamelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( _lowerCamelCase ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
lowerCamelCase__ : Dict = (sidea + sidea + sidea) / 2
lowerCamelCase__ : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( _lowerCamelCase ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 696
| 0
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCamelCase__ = NewType('''DataClass''', Any)
UpperCamelCase__ = NewType('''DataClassType''', Any)
def UpperCAmelCase ( snake_case : List[str] ):
if isinstance(snake_case , snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def UpperCAmelCase ( snake_case : list ):
_lowerCAmelCase:Tuple = {str(snake_case ): choice for choice in choices}
return lambda snake_case : str_to_choice.get(snake_case , snake_case )
def UpperCAmelCase ( *,
snake_case : Union[str, List[str]] = None , snake_case : str = None , snake_case : Any = dataclasses.MISSING , snake_case : Callable[[], Any] = dataclasses.MISSING , snake_case : dict = None , **snake_case : List[str] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase:Union[str, Any] = {}
if aliases is not None:
_lowerCAmelCase:Union[str, Any] = aliases
if help is not None:
_lowerCAmelCase:Optional[Any] = help
return dataclasses.field(metadata=snake_case , default=snake_case , default_factory=snake_case , **snake_case )
class a__ ( UpperCamelCase_ ):
snake_case__ = 42
def __init__( self : Optional[Any] ,a__ : Union[DataClassType, Iterable[DataClassType]] ,**a__ : List[Any]) -> Any:
"""simple docstring"""
if "formatter_class" not in kwargs:
_lowerCAmelCase:List[str] = ArgumentDefaultsHelpFormatter
super().__init__(**a__)
if dataclasses.is_dataclass(a__):
_lowerCAmelCase:str = [dataclass_types]
_lowerCAmelCase:str = list(a__)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a__)
@staticmethod
def __UpperCamelCase ( a__ : ArgumentParser ,a__ : dataclasses.Field) -> Any:
"""simple docstring"""
_lowerCAmelCase:List[str] = F'--{field.name}'
_lowerCAmelCase:int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,a__):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''')
_lowerCAmelCase:int = kwargs.pop('''aliases''' ,[])
if isinstance(a__ ,a__):
_lowerCAmelCase:str = [aliases]
_lowerCAmelCase:Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type)
if origin_type is Union or (hasattr(a__ ,'''UnionType''') and isinstance(a__ ,types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(a__) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.')
if type(a__) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase:str = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase:Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase:Union[str, Any] = (
field.type.__args__[0] if isinstance(a__ ,field.type.__args__[1]) else field.type.__args__[1]
)
_lowerCAmelCase:Optional[int] = getattr(field.type ,'''__origin__''' ,field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase:Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type ,a__) and issubclass(field.type ,a__)):
if origin_type is Literal:
_lowerCAmelCase:Optional[Any] = field.type.__args__
else:
_lowerCAmelCase:List[str] = [x.value for x in field.type]
_lowerCAmelCase:int = make_choice_type_function(kwargs['''choices'''])
if field.default is not dataclasses.MISSING:
_lowerCAmelCase:int = field.default
else:
_lowerCAmelCase:Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase:List[str] = copy(a__)
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase:Optional[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase:Dict = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase:Optional[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase:Dict = '''?'''
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase:List[str] = True
elif isclass(a__) and issubclass(a__ ,a__):
_lowerCAmelCase:List[Any] = field.type.__args__[0]
_lowerCAmelCase:List[str] = '''+'''
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase:Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase:Tuple = True
else:
_lowerCAmelCase:int = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase:Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase:Any = field.default_factory()
else:
_lowerCAmelCase:List[str] = True
parser.add_argument(a__ ,*a__ ,**a__)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase:Union[str, Any] = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**a__)
def __UpperCamelCase ( self : Dict ,a__ : DataClassType) -> Dict:
"""simple docstring"""
if hasattr(a__ ,'''_argument_group_name'''):
_lowerCAmelCase:int = self.add_argument_group(dtype._argument_group_name)
else:
_lowerCAmelCase:List[Any] = self
try:
_lowerCAmelCase:Dict[str, type] = get_type_hints(a__)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''')
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a__):
_lowerCAmelCase:Tuple = '''.'''.join(map(a__ ,sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''') from ex
raise
for field in dataclasses.fields(a__):
if not field.init:
continue
_lowerCAmelCase:Dict = type_hints[field.name]
self._parse_dataclass_field(a__ ,a__)
def __UpperCamelCase ( self : List[Any] ,a__ : List[Any]=None ,a__ : Any=False ,a__ : int=True ,a__ : Any=None ,a__ : Optional[Any]=None ,) -> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
_lowerCAmelCase:Optional[int] = []
if args_filename:
args_files.append(Path(a__))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix('''.args'''))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase:Optional[Any] = ArgumentParser()
args_file_parser.add_argument(a__ ,type=a__ ,action='''append''')
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase:Tuple = args_file_parser.parse_known_args(args=a__)
_lowerCAmelCase:Optional[Any] = vars(a__).get(args_file_flag.lstrip('''-''') ,a__)
if cmd_args_file_paths:
args_files.extend([Path(a__) for p in cmd_args_file_paths])
_lowerCAmelCase:List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase:Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase:Tuple = self.parse_known_args(args=a__)
_lowerCAmelCase:Tuple = []
for dtype in self.dataclass_types:
_lowerCAmelCase:str = {f.name for f in dataclasses.fields(a__) if f.init}
_lowerCAmelCase:List[Any] = {k: v for k, v in vars(a__).items() if k in keys}
for k in keys:
delattr(a__ ,a__)
_lowerCAmelCase:List[str] = dtype(**a__)
outputs.append(a__)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(a__)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def __UpperCamelCase ( self : Dict ,a__ : Dict[str, Any] ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
_lowerCAmelCase:List[Any] = set(args.keys())
_lowerCAmelCase:Optional[Any] = []
for dtype in self.dataclass_types:
_lowerCAmelCase:str = {f.name for f in dataclasses.fields(a__) if f.init}
_lowerCAmelCase:List[Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
_lowerCAmelCase:Optional[int] = dtype(**a__)
outputs.append(a__)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(a__)}')
return tuple(a__)
def __UpperCamelCase ( self : List[str] ,a__ : str ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(a__) ,encoding='''utf-8''') as open_json_file:
_lowerCAmelCase:Union[str, Any] = json.loads(open_json_file.read())
_lowerCAmelCase:Any = self.parse_dict(a__ ,allow_extra_keys=a__)
return tuple(a__)
def __UpperCamelCase ( self : Tuple ,a__ : str ,a__ : bool = False) -> Tuple[DataClass, ...]:
"""simple docstring"""
_lowerCAmelCase:int = self.parse_dict(yaml.safe_load(Path(a__).read_text()) ,allow_extra_keys=a__)
return tuple(a__)
| 227
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[Any] = []
_lowerCAmelCase:Dict = 11
_lowerCAmelCase:int = int('''1''' + '''0''' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase:Optional[Any] = 10
return solutions
def UpperCAmelCase ( snake_case : int = 2 ):
_lowerCAmelCase:Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
_lowerCAmelCase:Any = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 227
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCamelCase__ = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCamelCase__ = concatenate_datasets
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadManager
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadConfig
lowerCamelCase__ = DownloadMode
lowerCamelCase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 82
|
import logging
from transformers import PretrainedConfig
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''bertabs'''
def __init__( self : List[str] , lowercase_ : int=30522 , lowercase_ : str=512 , lowercase_ : int=6 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[int]=512 , lowercase_ : Tuple=0.2 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[Any]=768 , lowercase_ : List[str]=8 , lowercase_ : int=2048 , lowercase_ : Tuple=0.2 , **lowercase_ : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = vocab_size
_UpperCamelCase = max_pos
_UpperCamelCase = enc_layers
_UpperCamelCase = enc_hidden_size
_UpperCamelCase = enc_heads
_UpperCamelCase = enc_ff_size
_UpperCamelCase = enc_dropout
_UpperCamelCase = dec_layers
_UpperCamelCase = dec_hidden_size
_UpperCamelCase = dec_heads
_UpperCamelCase = dec_ff_size
_UpperCamelCase = dec_dropout
| 82
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Dict = botoa.client('''iam''' )
_UpperCAmelCase : Optional[int] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCamelCase__ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) )
_UpperCAmelCase : Optional[Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCamelCase__ , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=UpperCamelCase__ )["Role"]["Arn"]
def lowerCamelCase_ ():
_UpperCAmelCase : Union[str, Any] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , UpperCamelCase__ , )
_UpperCAmelCase : Optional[Any] = None
if credentials_configuration == 0:
_UpperCAmelCase : List[str] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
_UpperCAmelCase : str = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
_UpperCAmelCase : Tuple = _ask_field('''AWS Access Key ID: ''' )
_UpperCAmelCase : List[str] = aws_access_key_id
_UpperCAmelCase : Union[str, Any] = _ask_field('''AWS Secret Access Key: ''' )
_UpperCAmelCase : Dict = aws_secret_access_key
_UpperCAmelCase : int = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
_UpperCAmelCase : Dict = aws_region
_UpperCAmelCase : Union[str, Any] = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , UpperCamelCase__ , )
if role_management == 0:
_UpperCAmelCase : Optional[Any] = _ask_field('''Enter your IAM role name: ''' )
else:
_UpperCAmelCase : int = '''accelerate_sagemaker_execution_role'''
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(UpperCamelCase__ )
_UpperCAmelCase : Dict = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
_UpperCAmelCase : Union[str, Any] = None
if is_custom_docker_image:
_UpperCAmelCase : Dict = _ask_field('''Enter your Docker image: ''' , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() )
_UpperCAmelCase : str = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
_UpperCAmelCase : List[str] = None
if is_sagemaker_inputs_enabled:
_UpperCAmelCase : List[str] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , )
_UpperCAmelCase : List[str] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
_UpperCAmelCase : Optional[Any] = None
if is_sagemaker_metrics_enabled:
_UpperCAmelCase : Tuple = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , )
_UpperCAmelCase : Optional[Any] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Dict = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
_UpperCAmelCase : int = '''dynamo_'''
_UpperCAmelCase : Dict = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_UpperCAmelCase : Dict = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
_UpperCAmelCase : List[str] = _ask_options(
'''Which mode do you want to use?''' , UpperCamelCase__ , lambda UpperCamelCase__ : TORCH_DYNAMO_MODES[int(UpperCamelCase__ )] , default='''default''' , )
_UpperCAmelCase : Tuple = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
_UpperCAmelCase : Any = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
_UpperCAmelCase : Tuple = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
_UpperCAmelCase : List[Any] = _ask_options(
UpperCamelCase__ , UpperCamelCase__ , lambda UpperCamelCase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_UpperCAmelCase : Union[str, Any] = _ask_field(UpperCamelCase__ , lambda UpperCamelCase__ : str(UpperCamelCase__ ).lower() , default='''ml.p3.2xlarge''' )
_UpperCAmelCase : Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_UpperCAmelCase : Optional[int] = _ask_field(
'''How many machines do you want use? [1]: ''' , UpperCamelCase__ , default=1 , )
_UpperCAmelCase : List[str] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=UpperCamelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase__ , use_cpu=UpperCamelCase__ , dynamo_config=UpperCamelCase__ , eca_instance_type=UpperCamelCase__ , profile=UpperCamelCase__ , region=UpperCamelCase__ , iam_role_name=UpperCamelCase__ , mixed_precision=UpperCamelCase__ , num_machines=UpperCamelCase__ , sagemaker_inputs_file=UpperCamelCase__ , sagemaker_metrics_file=UpperCamelCase__ , )
| 506
|
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase : int = []
for line in lines:
_UpperCAmelCase : str = re.sub(r'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
_UpperCAmelCase : Optional[Any] = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
_lowerCAmelCase :Optional[Any] = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowerCAmelCase :str = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowerCAmelCase :Optional[int] = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowerCAmelCase :Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 506
| 1
|
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> list[list[int]]:
__snake_case : list[list[int]] = []
create_all_state(1 ,_UpperCAmelCase ,_UpperCAmelCase ,[] ,_UpperCAmelCase )
return result
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : list[list[int]] ,) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_UpperCAmelCase ,total_number - level + 2 ):
current_list.append(_UpperCAmelCase )
create_all_state(i + 1 ,_UpperCAmelCase ,level - 1 ,_UpperCAmelCase ,_UpperCAmelCase )
current_list.pop()
def a_ ( _UpperCAmelCase : list[list[int]] ) -> None:
for i in total_list:
print(*_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = 4
A__ : Union[str, Any] = 2
A__ : int = generate_all_combinations(n, k)
print_all_state(total_list)
| 701
|
'''simple docstring'''
import requests
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> None:
__snake_case : Tuple = {'Content-Type': 'application/json'}
__snake_case : Optional[int] = requests.post(_UpperCAmelCase ,json={'text': message_body} ,headers=_UpperCAmelCase )
if response.status_code != 2_00:
__snake_case : Tuple = (
'Request to slack returned an error '
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 124
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'mobilenet_v2'
def __init__( self , A__=3 , A__=2_24 , A__=1.0 , A__=8 , A__=8 , A__=6 , A__=32 , A__=True , A__=True , A__="relu6" , A__=True , A__=0.8 , A__=0.02 , A__=0.001 , A__=2_55 , **A__ , ) -> int:
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = depth_multiplier
_SCREAMING_SNAKE_CASE = depth_divisible_by
_SCREAMING_SNAKE_CASE = min_depth
_SCREAMING_SNAKE_CASE = expand_ratio
_SCREAMING_SNAKE_CASE = output_stride
_SCREAMING_SNAKE_CASE = first_layer_is_expansion
_SCREAMING_SNAKE_CASE = finegrained_output
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = tf_padding
_SCREAMING_SNAKE_CASE = classifier_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class _a (lowerCAmelCase__):
"""simple docstring"""
SCREAMING_SNAKE_CASE = version.parse('1.11')
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def UpperCamelCase ( self ) -> Optional[int]:
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def UpperCamelCase ( self ) -> List[Any]:
return 1E-4
| 591
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A__ ( UpperCAmelCase__ ):
def __init__( self :Optional[Any] , SCREAMING_SNAKE_CASE :NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE :Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE :Optional[Features] = None , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :bool = False , SCREAMING_SNAKE_CASE :Optional[int] = None , **SCREAMING_SNAKE_CASE :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , num_proc=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_a : Optional[Any] =path_or_paths if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
_a : int =Text(
cache_dir=SCREAMING_SNAKE_CASE , data_files=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
# Build iterable dataset
if self.streaming:
_a : int =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a : List[str] =None
_a : Dict =None
_a : Union[str, Any] =None
_a : int =None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE , download_mode=SCREAMING_SNAKE_CASE , verification_mode=SCREAMING_SNAKE_CASE , base_path=SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
_a : List[str] =self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 708
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str = " " ) -> list:
_a : int =[]
_a : Tuple =0
for index, char in enumerate(_UpperCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_a : Union[str, Any] =index + 1
elif index + 1 == len(_UpperCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 506
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _lowerCAmelCase ( lowerCamelCase_ : int = 5_0_0_0 ):
__lowercase = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCamelCase_ )]
for i, pentagonal_i in enumerate(lowerCamelCase_ ):
for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ):
__lowercase = pentagonal_nums[j]
__lowercase = pentagonal_i + pentagonal_j
__lowercase = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCamelCase_ ) and is_pentagonal(lowerCamelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 502
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 502
| 1
|
def __lowercase ( _A , _A ) -> float:
return base * power(_A , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
UpperCAmelCase__ : Any = int(input("""Enter the base: """).strip())
UpperCAmelCase__ : Union[str, Any] = int(input("""Enter the exponent: """).strip())
UpperCAmelCase__ : Tuple = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
UpperCAmelCase__ : Optional[int] = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 717
|
from math import ceil
def __lowercase ( _A = 1001 ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Dict = 2 * i + 1
SCREAMING_SNAKE_CASE : Optional[int] = 2 * i
SCREAMING_SNAKE_CASE : str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase__ : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 446
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _snake_case (__lowercase): # picklable for multiprocessing
return x.sum()
def _snake_case (__lowercase): # picklable for multiprocessing
return i + 1
@dataclass
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
class _a ( __lowerCamelCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 1
UpperCamelCase_ = [1, 2]
UpperCamelCase_ = {'a': 1, 'b': 2}
UpperCamelCase_ = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_ = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_ = {}
UpperCamelCase_ = []
UpperCamelCase_ = 2
UpperCamelCase_ = [2, 3]
UpperCamelCase_ = {'a': 2, 'b': 3}
UpperCamelCase_ = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_ = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = 2
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
UpperCamelCase_ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase_ = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase_ = {
'a': np.eye(2 ).astype(_UpperCAmelCase ),
'b': np.zeros(3 ).astype(_UpperCAmelCase ),
'c': np.ones(2 ).astype(_UpperCAmelCase ),
}
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCAmelCase , _UpperCAmelCase , map_numpy=_UpperCAmelCase , num_proc=_UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_UpperCAmelCase ): # can't pickle a local lambda
map_nested(lambda _UpperCAmelCase : x + 1 , _UpperCAmelCase , num_proc=_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = {'a': 1, 'b': 2}
UpperCamelCase_ = {'a': 3, 'b': 4}
UpperCamelCase_ = {'a': 5, 'b': 6}
UpperCamelCase_ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
class _a :
"""simple docstring"""
A_ = """bar"""
UpperCamelCase_ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_UpperCAmelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _snake_case (__lowercase , __lowercase , __lowercase):
with patch('datasets.utils.py_utils._single_map_nested') as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool') as mock_multiprocessing_pool:
UpperCamelCase_ = {f"""{i}""": i for i in range(__SCREAMING_SNAKE_CASE)}
UpperCamelCase_ = map_nested(lambda __lowercase: x + 10 , __SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( __lowerCamelCase ):
"""simple docstring"""
@require_tf
def _UpperCAmelCase ( self ) -> str:
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase_ = layers.Dense(2 )
def gen_random_output():
UpperCamelCase_ = tf.random.uniform((1, 3) )
return model(_UpperCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _UpperCAmelCase ( self ) -> Dict:
import torch
def gen_random_output():
UpperCamelCase_ = torch.nn.Linear(3 , 2 )
UpperCamelCase_ = torch.rand(1 , 3 )
return model(_UpperCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=_UpperCAmelCase ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase_ = gen_random_output()
with temp_seed(42 ):
UpperCamelCase_ = gen_random_output()
UpperCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCAmelCase , _UpperCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}])
def _snake_case (__lowercase):
UpperCamelCase_ = NestedDataStructure(__SCREAMING_SNAKE_CASE).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def _snake_case (__lowercase , __lowercase):
UpperCamelCase_ = NestedDataStructure(__SCREAMING_SNAKE_CASE).flatten()
assert output == expected_output
def _snake_case ():
UpperCamelCase_ = A(x=1 , y='foobar')
UpperCamelCase_ = {'x': 1, 'y': 'foobar'}
assert asdict(__SCREAMING_SNAKE_CASE) == expected_output
UpperCamelCase_ = {'a': {'b': A(x=10 , y='foo')}, 'c': [A(x=20 , y='bar')]}
UpperCamelCase_ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(__SCREAMING_SNAKE_CASE) == expected_output
with pytest.raises(__SCREAMING_SNAKE_CASE):
asdict([1, A(x=10 , y='foo')])
def _snake_case (__lowercase):
return text.split()
def _snake_case (__lowercase):
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def _snake_case ():
with Pool(2) as pool:
UpperCamelCase_ = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(__SCREAMING_SNAKE_CASE) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
UpperCamelCase_ = list(iflatmap_unordered(__SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10))
assert out.count('hello') == 10
assert out.count('there') == 10
assert len(__SCREAMING_SNAKE_CASE) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
UpperCamelCase_ = []
for yield_time, content in iflatmap_unordered(
__SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__SCREAMING_SNAKE_CASE)
assert out.count('a') == 2
assert out.count('b') == 2
assert len(__SCREAMING_SNAKE_CASE) == 4
| 23
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = data
lowercase = None
def __str__( self ):
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
lowercase = None
def __iter__( self ):
lowercase = self.top
while node:
yield node.data
lowercase = node.next
def __str__( self ):
return "->".join([str(snake_case ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.top is None
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = Node(snake_case )
if not self.is_empty():
lowercase = self.top
lowercase = node
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , snake_case )
lowercase = self.top
lowercase = self.top.next
return pop_node.data
def SCREAMING_SNAKE_CASE__ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 84
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a__ :
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase="resnet50" , __lowercase=3 , __lowercase=32 , __lowercase=3 , __lowercase=True , __lowercase=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def _snake_case (self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def _snake_case (self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _snake_case (self , __lowercase , __lowercase ):
__lowerCAmelCase = TimmBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(__lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _snake_case (self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (TimmBackbone,) if is_torch_available() else ()
__UpperCamelCase : Any = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__UpperCamelCase : str = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
def _snake_case (self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def _snake_case (self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case (self ):
__lowerCAmelCase = '''resnet18'''
__lowerCAmelCase = '''microsoft/resnet-18'''
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(__lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _snake_case (self ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _snake_case (self ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _snake_case (self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
__lowerCAmelCase = self._prepare_for_class(__lowercase , __lowercase )
__lowerCAmelCase = model(**__lowercase )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(__lowercase )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(__lowercase )
model.to(__lowercase )
model.eval()
__lowerCAmelCase = model(**__lowercase )
| 474
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Any = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : str = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Tuple = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
def __magic_name__( *lowerCamelCase, **lowerCamelCase):
requires_backends(lowerCamelCase, ['''torch'''])
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : str = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Tuple = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : str = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Tuple = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Any = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : int = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Any = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
class a__ ( metaclass=__A ):
"""simple docstring"""
__UpperCamelCase : Dict = ['torch']
def __init__(self , *__lowercase , **__lowercase ):
requires_backends(self , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
@classmethod
def _snake_case (cls , *__lowercase , **__lowercase ):
requires_backends(cls , ['''torch'''] )
| 474
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _A ( UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ['pixel_values']
def __init__( self : str , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Tuple , ) -> None:
super().__init__(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =size if size is not None else {"""height""": 384, """width""": 384}
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =do_resize
__UpperCAmelCase =size
__UpperCAmelCase =resample
__UpperCAmelCase =do_rescale
__UpperCAmelCase =rescale_factor
__UpperCAmelCase =do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase =do_convert_rgb
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Dict , ) -> np.ndarray:
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCAmelCase =(size["""height"""], size["""width"""])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict:
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> np.ndarray:
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : int , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, int]] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[float] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase =do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase =resample if resample is not None else self.resample
__UpperCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase =image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase =image_std if image_std is not None else self.image_std
__UpperCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase =size if size is not None else self.size
__UpperCAmelCase =get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase =[convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase =[to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__UpperCAmelCase =[self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__UpperCAmelCase =[self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__UpperCAmelCase =[self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =[to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
__UpperCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_outputs
| 68
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : float | Decimal , UpperCamelCase__ : float = 10**-10 ):
_UpperCAmelCase : str = a
while True:
_UpperCAmelCase : int = Decimal(UpperCamelCase__ ) - (
Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307
return float(UpperCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 506
| 0
|
import math
class A_ :
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : list[list[float]] , snake_case__ : list[int] ):
lowercase = 0.0
lowercase = 0.0
for i in range(len(snake_case__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : list[list[int | float]] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : float ):
for i in range(len(snake_case__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def UpperCamelCase__ ( ):
# Training Examples ( m, n )
lowercase = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowercase = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowercase = SelfOrganizingMap()
lowercase = 3
lowercase = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
lowercase = training_samples[j]
# Compute the winning vector
lowercase = self_organizing_map.get_winner(lowerCAmelCase__ ,lowerCAmelCase__ )
# Update the winning vector
lowercase = self_organizing_map.update(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# classify test sample
lowercase = [0, 0, 0, 1]
lowercase = self_organizing_map.get_winner(lowerCAmelCase__ ,lowerCAmelCase__ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 716
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" ,[
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] ,)
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" ,"""w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" ,"""w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" ,[
DatasetInfo(),
DatasetInfo(
description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,),
] ,)
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = str(lowerCAmelCase__ )
dataset_info.write_to_directory(lowerCAmelCase__ )
lowercase = DatasetInfo.from_directory(lowerCAmelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""dataset_info.json""" ) )
def UpperCamelCase__ ( ):
lowercase = DatasetInfo(
description="""foo""" ,citation="""bar""" ,homepage="""https://foo.bar""" ,license="""CC0""" ,features=Features({"""a""": Value("""int32""" )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train""", """num_examples""": 42}] ,download_checksums={} ,download_size=1_337 ,post_processing_size=442 ,dataset_size=1_234 ,size_in_bytes=1_337 + 442 + 1_234 ,)
lowercase = dataset_info._to_yaml_dict()
assert sorted(lowerCAmelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
lowercase = yaml.safe_dump(lowerCAmelCase__ )
lowercase = yaml.safe_load(lowerCAmelCase__ )
assert dataset_info_yaml_dict == reloaded
def UpperCamelCase__ ( ):
lowercase = DatasetInfo()
lowercase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" ,[
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" ,features=Features({"""a""": Value("""int32""" )} ) ,builder_name="""builder""" ,config_name="""config""" ,version="""1.0.0""" ,splits=[{"""name""": """train"""}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1_337 ),
} ),
] ,)
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = str(lowerCAmelCase__ )
dataset_infos_dict.write_to_directory(lowerCAmelCase__ )
lowercase = DatasetInfosDict.from_directory(lowerCAmelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCAmelCase__ ,"""README.md""" ) )
| 72
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowerCAmelCase_( __snake_case ):
'''simple docstring'''
__lowercase : Tuple = '''fnet'''
def __init__( self ,__UpperCAmelCase=3_2000 ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=512 ,__UpperCAmelCase=4 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=False ,__UpperCAmelCase=512 ,__UpperCAmelCase=3 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,**__UpperCAmelCase ,) -> Any:
super().__init__(pad_token_id=_lowercase ,bos_token_id=_lowercase ,eos_token_id=_lowercase ,**_lowercase )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[Any] = use_tpu_fourier_optimizations
lowerCAmelCase__ : int = tpu_short_seq_length
| 565
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip_2_vision_model"
def __init__(self , _lowercase=1408 , _lowercase=6144 , _lowercase=39 , _lowercase=16 , _lowercase=224 , _lowercase=14 , _lowercase="gelu" , _lowercase=0.0_0001 , _lowercase=0.0 , _lowercase=1e-10 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Tuple = hidden_size
__a : Any = intermediate_size
__a : Dict = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : str = patch_size
__a : Union[str, Any] = image_size
__a : List[Any] = initializer_range
__a : List[str] = attention_dropout
__a : Union[str, Any] = layer_norm_eps
__a : Optional[int] = hidden_act
__a : int = qkv_bias
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__a : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip_2_qformer"
def __init__(self , _lowercase=30522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=2 , _lowercase=1408 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , **_lowercase )
__a : int = vocab_size
__a : Union[str, Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Any = num_attention_heads
__a : List[str] = hidden_act
__a : Union[str, Any] = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Union[str, Any] = initializer_range
__a : Union[str, Any] = layer_norm_eps
__a : Any = position_embedding_type
__a : Union[str, Any] = cross_attention_frequency
__a : str = encoder_hidden_size
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : Optional[int] = cls.get_config_dict(_lowercase , **_lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
__a : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "blip-2"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=32 , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
if vision_config is None:
__a : List[Any] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
__a : Optional[int] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
__a : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
__a : List[Any] = BlipaVisionConfig(**_lowercase )
__a : List[str] = BlipaQFormerConfig(**_lowercase )
__a : str = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__a : Optional[Any] = CONFIG_MAPPING[text_model_type](**_lowercase )
__a : Dict = self.text_config.tie_word_embeddings
__a : Optional[Any] = self.text_config.is_encoder_decoder
__a : Union[str, Any] = num_query_tokens
__a : Union[str, Any] = self.vision_config.hidden_size
__a : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__a : Optional[Any] = 1.0
__a : List[str] = 0.02
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase , **_lowercase , ):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_lowercase , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = copy.deepcopy(self.__dict__ )
__a : Union[str, Any] = self.vision_config.to_dict()
__a : int = self.qformer_config.to_dict()
__a : List[Any] = self.text_config.to_dict()
__a : int = self.__class__.model_type
return output
| 581
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( A_ , unittest.TestCase ):
A_ : Optional[int] = DanceDiffusionPipeline
A_ : List[str] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
A_ : Dict = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
A_ : Tuple = False
A_ : int = False
def lowerCAmelCase_ ( self : List[str] ) -> Dict:
torch.manual_seed(0 )
__a = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCamelCase_ , use_timestep_embedding=lowerCamelCase_ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
__a = IPNDMScheduler()
__a = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict=0 ) -> List[Any]:
if str(lowerCamelCase_ ).startswith("""mps""" ):
__a = torch.manual_seed(lowerCamelCase_ )
else:
__a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
__a = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = DanceDiffusionPipeline(**lowerCamelCase_ )
__a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__a = self.get_dummy_inputs(lowerCamelCase_ )
__a = pipe(**lowerCamelCase_ )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__a = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ ( self : int ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase_ ( self : int ) -> Dict:
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase_ ( self : Optional[int] ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
__a = torch_device
__a = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
__a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__a = torch.manual_seed(0 )
__a = pipe(generator=lowerCamelCase_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__a = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : int ) -> Union[str, Any]:
__a = torch_device
__a = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
__a = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__a = torch.manual_seed(0 )
__a = pipe(generator=lowerCamelCase_ , num_inference_steps=1_00 , audio_length_in_s=4.0_96 )
__a = output.audios
__a = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__a = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 173
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=30 , lowerCamelCase_ : Union[str, Any]=4_00 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=1 / 2_55 , lowerCamelCase_ : int=True , ) -> str:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__a = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def lowerCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int=False ) -> List[str]:
if not batched:
__a = image_inputs[0]
if isinstance(lowerCamelCase_ , Image.Image ):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size["""shortest_edge"""] * h / w )
__a = self.size["""shortest_edge"""]
elif w > h:
__a = self.size["""shortest_edge"""]
__a = int(self.size["""shortest_edge"""] * w / h )
else:
__a = self.size["""shortest_edge"""]
__a = self.size["""shortest_edge"""]
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[0] )[0]
__a = max(lowerCamelCase_ , key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( A_ , unittest.TestCase ):
A_ : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Dict ) -> Tuple:
__a = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple ) -> Tuple:
pass
def lowerCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ) -> List[Any]:
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
__a , __a = self.image_processor_tester.get_expected_values(lowerCamelCase_ , batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Tuple:
# prepare image and target
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__a = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
@slow
def lowerCAmelCase_ ( self : str ) -> str:
# prepare image, target and masks_path
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a = json.loads(f.read() )
__a = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__a = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__a = image_processing(images=lowerCamelCase_ , annotations=lowerCamelCase_ , masks_path=lowerCamelCase_ , return_tensors="""pt""" )
# verify pixel values
__a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase_ , atol=1E-4 ) )
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase_ ) )
# verify boxes
__a = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase_ )
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase_ , atol=1E-3 ) )
# verify image_id
__a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase_ ) )
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase_ ) )
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase_ ) )
# verify masks
__a = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase_ )
# verify orig_size
__a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase_ ) )
# verify size
__a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase_ ) )
| 173
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.