code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
_UpperCAmelCase = model.config
_UpperCAmelCase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
_UpperCAmelCase = MBartConfig(
is_decoder=snake_case , is_encoder_decoder=snake_case , add_cross_attention=snake_case , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=snake_case , add_final_layer_norm=snake_case , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Any:
if "encoder.model" in name:
_UpperCAmelCase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
_UpperCAmelCase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
_UpperCAmelCase = """encoder.""" + name
if "attn.proj" in name:
_UpperCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
_UpperCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
_UpperCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
_UpperCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
_UpperCAmelCase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
_UpperCAmelCase = """encoder.layernorm.bias"""
return name
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(snake_case )
if "qkv" in key:
_UpperCAmelCase = key.split(""".""" )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = int(key_split[5] )
_UpperCAmelCase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[dim : dim * 2]
_UpperCAmelCase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_UpperCAmelCase = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case=None , snake_case=False ) -> List[Any]:
# load original model
_UpperCAmelCase = DonutModel.from_pretrained(snake_case ).eval()
# load HuggingFace model
_UpperCAmelCase , _UpperCAmelCase = get_configs(snake_case )
_UpperCAmelCase = DonutSwinModel(snake_case )
_UpperCAmelCase = MBartForCausalLM(snake_case )
_UpperCAmelCase = VisionEncoderDecoderModel(encoder=snake_case , decoder=snake_case )
model.eval()
_UpperCAmelCase = original_model.state_dict()
_UpperCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# verify results on scanned document
_UpperCAmelCase = load_dataset("""hf-internal-testing/example-documents""" )
_UpperCAmelCase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
_UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained(snake_case , from_slow=snake_case )
_UpperCAmelCase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_UpperCAmelCase = DonutProcessor(snake_case , snake_case )
_UpperCAmelCase = processor(snake_case , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_UpperCAmelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_UpperCAmelCase = """When is the coffee break?"""
_UpperCAmelCase = task_prompt.replace("""{user_input}""" , snake_case )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_UpperCAmelCase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_UpperCAmelCase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_UpperCAmelCase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_UpperCAmelCase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_UpperCAmelCase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
_UpperCAmelCase = original_model.decoder.tokenizer(snake_case , add_special_tokens=snake_case , return_tensors="""pt""" )[
"""input_ids"""
]
_UpperCAmelCase = original_model.encoder.model.patch_embed(snake_case )
_UpperCAmelCase , _UpperCAmelCase = model.encoder.embeddings(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
# verify encoder hidden states
_UpperCAmelCase = original_model.encoder(snake_case )
_UpperCAmelCase = model.encoder(snake_case ).last_hidden_state
assert torch.allclose(snake_case , snake_case , atol=1E-2 )
# verify decoder hidden states
_UpperCAmelCase = original_model(snake_case , snake_case , snake_case ).logits
_UpperCAmelCase = model(snake_case , decoder_input_ids=snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
a = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 518
|
a = 8.3_144_598
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a = 300
a = 28
a = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 518
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Dict = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
a__ : Dict = logging.getLogger(__name__)
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name', type=__lowerCamelCase, default='wikitext', help='Name of the training. Explore datasets at: hf.co/datasets.', )
parser.add_argument(
'--dataset_config', type=__lowerCamelCase, default='wikitext-103-raw-v1', help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path', type=__lowerCamelCase, default='sayakpaul/unigram-tokenizer-wikitext', help='Tokenizer identifier. Can be a local filepath or a Hub identifier.', )
parser.add_argument(
'--shard_size', type=__lowerCamelCase, default=1_0_0_0, help='Number of entries to go in a single shard.', )
parser.add_argument('--split', type=__lowerCamelCase, default='train', choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit', default=__lowerCamelCase, type=__lowerCamelCase, help='Limit the number of shards (used for debugging).', )
parser.add_argument(
'--max_length', type=__lowerCamelCase, default=5_1_2, help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.', )
parser.add_argument(
'--output_dir', default='tf-tpu', type=__lowerCamelCase, help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.', )
_lowerCAmelCase = parser.parse_args()
return args
def A__ ( __lowerCamelCase ):
"""simple docstring"""
def fn(__lowerCamelCase ):
return tokenizer(examples['text'] )
return fn
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_lowerCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_lowerCAmelCase = tf.train.Features(feature=__lowerCamelCase )
_lowerCAmelCase = tf.train.Example(features=__lowerCamelCase )
_lowerCAmelCase = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_lowerCAmelCase = min(len(__lowerCamelCase ), args.limit )
_lowerCAmelCase = dataset.select(range(__lowerCamelCase ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase = os.path.join(args.output_dir, args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
_lowerCAmelCase = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase = tokenize_function(__lowerCamelCase )
_lowerCAmelCase = dataset.map(__lowerCamelCase, batched=__lowerCamelCase, num_proc=4, remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
_lowerCAmelCase = {k: sum(examples[k], [] ) for k in examples.keys()}
_lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0, __lowerCamelCase, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase = dataset_tokenized.map(__lowerCamelCase, batched=__lowerCamelCase, batch_size=1_0_0_0, num_proc=4 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for shard in range(0, len(__lowerCamelCase ), args.shard_size ):
_lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase = len(dataset_snapshot['input_ids'] )
_lowerCAmelCase = os.path.join(__lowerCamelCase, F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowerCAmelCase = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
_lowerCAmelCase = serialized_examples[i]
out_file.write(__lowerCamelCase )
print('Wrote file {} containing {} records'.format(__lowerCamelCase, __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''', 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''', file=__lowerCamelCase )
if __name__ == "__main__":
a__ : str = parse_args()
main(args)
| 309
| 1
|
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_A = namedtuple("covid_data", "cases deaths recovered")
def lowercase (_snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
__UpperCamelCase = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(_snake_case ).content ).xpath(_snake_case ) )
_A = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 505
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
_A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
_A = {
"RUCAIBox/mvp": 1_024,
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Dict = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = ['input_ids', 'attention_mask']
_snake_case : Any = MvpTokenizer
def __init__( self : str , A_ : int=None , A_ : List[Any]=None , A_ : Optional[Any]=None , A_ : int="replace" , A_ : int="<s>" , A_ : Any="</s>" , A_ : List[str]="</s>" , A_ : Optional[int]="<s>" , A_ : Optional[int]="<unk>" , A_ : Optional[int]="<pad>" , A_ : Union[str, Any]="<mask>" , A_ : str=False , A_ : List[str]=True , **A_ : Union[str, Any] , )-> Any:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = getattr(A_ , pre_tok_state.pop("type" ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**A_ )
__UpperCamelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCamelCase = "post_processor"
__UpperCamelCase = getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state["sep"] )
if "cls" in state:
__UpperCamelCase = tuple(state["cls"] )
__UpperCamelCase = False
if state.get("add_prefix_space" , A_ ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get("trim_offsets" , A_ ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(A_ , state.pop("type" ) )
__UpperCamelCase = component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
def A ( self : List[str] )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : Any , A_ : List[Any] )-> List[Any]:
__UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
__UpperCamelCase = value
def A ( self : str , *A_ : Dict , **A_ : Dict )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*A_ , **A_ )
def A ( self : Tuple , *A_ : str , **A_ : List[str] )-> BatchEncoding:
__UpperCamelCase = kwargs.get("is_split_into_words" , A_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*A_ , **A_ )
def A ( self : Optional[int] , A_ : str , A_ : Optional[str] = None )-> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def A ( self : Any , A_ : Dict , A_ : Dict=None )-> Union[str, Any]:
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Optional[int] , A_ : List[int] , A_ : Optional[List[int]] = None )-> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 505
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
snake_case_ : List[Any] ='''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
snake_case_ : int =[
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
__A = SavedModel()
__A = []
with open(os.path.join(_SCREAMING_SNAKE_CASE , "utils" , "tf_ops" , "onnx.json" ) ) as f:
__A = json.load(_SCREAMING_SNAKE_CASE )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
saved_model.ParseFromString(f.read() )
__A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__A = sorted(_SCREAMING_SNAKE_CASE )
__A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_SCREAMING_SNAKE_CASE )
if strict and len(_SCREAMING_SNAKE_CASE ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_SCREAMING_SNAKE_CASE , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
snake_case_ : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
snake_case_ : int =parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 701
|
class a__ :
def __init__( self ) -> str:
__A = 0
__A = 0
__A = {}
def _lowerCamelCase ( self , lowercase__ ) -> List[Any]:
if vertex not in self.adjacency:
__A = {}
self.num_vertices += 1
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__A = weight
__A = weight
def _lowerCamelCase ( self ) -> List[str]:
__A = self.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__A = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__A = edges[i][2] + 1
for edge in edges:
__A , __A , __A = edge
__A = weight
__A = weight
def __str__( self ) -> Union[str, Any]:
__A = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__A = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowerCamelCase ( self ) -> Tuple:
return self.adjacency.keys()
@staticmethod
def _lowerCamelCase ( lowercase__=None , lowercase__=None ) -> Any:
__A = Graph()
if vertices is None:
__A = []
if edges is None:
__A = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class a__ :
def __init__( self ) -> List[str]:
__A = {}
__A = {}
def __len__( self ) -> Union[str, Any]:
return len(self.parent )
def _lowerCamelCase ( self , lowercase__ ) -> Any:
if item in self.parent:
return self.find(lowercase__ )
__A = item
__A = 0
return item
def _lowerCamelCase ( self , lowercase__ ) -> str:
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__A = self.find(self.parent[item] )
return self.parent[item]
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> List[Any]:
__A = self.find(lowercase__ )
__A = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__A = roota
return roota
if self.rank[roota] < self.rank[roota]:
__A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__A = roota
return roota
return None
@staticmethod
def _lowerCamelCase ( lowercase__ ) -> Any:
__A = graph.num_vertices
__A = Graph.UnionFind()
__A = []
while num_components > 1:
__A = {}
for vertex in graph.get_vertices():
__A = -1
__A = graph.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for edge in edges:
__A , __A , __A = edge
__A = union_find.find(lowercase__ )
__A = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__A , __A , __A = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ , lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__A = num_components - 1
__A = Graph.build(edges=lowercase__ )
return mst
| 205
| 0
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase_ ( UpperCAmelCase__ = 2_000_000 ):
"""simple docstring"""
a_ = [0]
a_ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
a_ = 0
# the area corresponding to the grid that gives the product closest to target
a_ = 0
# an estimate of b, using the quadratic formula
a_ = 42
# the largest integer less than b_estimate
a_ = 42
# the largest integer less than b_estimate
a_ = 42
# the triangle number corresponding to b_floor
a_ = 42
# the triangle number corresponding to b_ceil
a_ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
a_ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
a_ = floor(__a )
a_ = ceil(__a )
a_ = triangle_numbers[b_floor]
a_ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
a_ = triangle_b_first_guess * triangle_a
a_ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
a_ = triangle_b_second_guess * triangle_a
a_ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 483
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : List[str] = vocab
SCREAMING_SNAKE_CASE_ : Any = merges
SCREAMING_SNAKE_CASE_ : List[str] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Dict , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : Union[str, os.PathLike] , *lowercase_ : str , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowercase_ : int):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 512
| 0
|
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_UpperCAmelCase ) != 0:
snake_case_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(_UpperCAmelCase , (int, float) ):
raise error
snake_case_ = rows
else:
snake_case_ = []
def UpperCamelCase__ ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCamelCase__ ( self ):
return len(self.rows )
@property
def UpperCamelCase__ ( self ):
return len(self.rows[0] )
@property
def UpperCamelCase__ ( self ):
return (self.num_rows, self.num_columns)
@property
def UpperCamelCase__ ( self ):
return self.order[0] == self.order[1]
def UpperCamelCase__ ( self ):
snake_case_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_UpperCAmelCase )
def UpperCamelCase__ ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCamelCase__ ( self ):
return bool(self.determinant() )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_UpperCAmelCase ).determinant()
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
if (row + column) % 2 == 0:
return self.get_minor(_UpperCAmelCase , _UpperCAmelCase )
return -1 * self.get_minor(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
return Matrix(
[
[self.get_minor(_UpperCAmelCase , _UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCamelCase__ ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCamelCase__ ( self ):
snake_case_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_UpperCAmelCase ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
snake_case_ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(_UpperCAmelCase , (int, float) ):
raise type_error
if len(_UpperCAmelCase ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_UpperCAmelCase )
else:
snake_case_ = self.rows[0:position] + [row] + self.rows[position:]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
snake_case_ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(_UpperCAmelCase , (int, float) ):
raise type_error
if len(_UpperCAmelCase ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
snake_case_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _UpperCAmelCase ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , _UpperCAmelCase ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _UpperCAmelCase ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_UpperCAmelCase , _UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
snake_case_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCamelCase__ ( cls , _UpperCAmelCase , _UpperCAmelCase ):
return sum(row[i] * column[i] for i in range(len(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 531
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None )-> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case_ = nn.Parameter(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
snake_case_ = np.asarray(weights[0] )
snake_case_ = np.asarray(weights[1] )
snake_case_ = np.asarray(weights[2] )
snake_case_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).view(-1 , SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = weights[0][0][0]
snake_case_ = np.asarray(layer_norm_a[0] )
snake_case_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
snake_case_ = weights[0][1]
if len(SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE , torch_block.attention , SCREAMING_SNAKE_CASE )
# intermediate weighs
snake_case_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE ) == 4:
snake_case_ = intermediate_weights[2]
# layernorm 2
snake_case_ = np.asarray(intermediate_weights[0][0] )
snake_case_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate dense
snake_case_ = np.asarray(intermediate_weights[1][0] )
snake_case_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# intermediate out
snake_case_ = np.asarray(intermediate_weights[4][0] )
snake_case_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> str:
"""simple docstring"""
snake_case_ = torch_model.reformer
# word embeds
snake_case_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE ):
snake_case_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case_ = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ) )
snake_case_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# output layer norm
snake_case_ = np.asarray(weights[7][0] )
snake_case_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) , )
# output embeddings
snake_case_ = np.asarray(weights[9][0] )
snake_case_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE ) , )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
snake_case_ = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case_ = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case_ = pickle.load(SCREAMING_SNAKE_CASE )['''weights''']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 531
| 1
|
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A : Optional[Any] = [
# tf -> hf
('/', '.'),
('layer_', 'layers.'),
('kernel', 'weight'),
('beta', 'bias'),
('gamma', 'weight'),
('pegasus', 'model'),
]
A : Union[str, Any] = [
('.output.dense', '.fc2'),
('intermediate.LayerNorm', 'final_layer_norm'),
('intermediate.dense', 'fc1'),
]
A : List[Any] = (
INIT_COMMON
+ [
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.out_proj'),
('attention.self', 'self_attn'),
('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'),
('attention.encdec_output.dense', 'encoder_attn.out_proj'),
('attention.encdec', 'encoder_attn'),
('key', 'k_proj'),
('value', 'v_proj'),
('query', 'q_proj'),
('decoder.LayerNorm', 'decoder.layernorm_embedding'),
]
+ END_COMMON
)
A : str = (
INIT_COMMON
+ [
('embeddings.word_embeddings', 'shared.weight'),
('embeddings.position_embeddings', 'embed_positions.weight'),
('attention.self.LayerNorm', 'self_attn_layer_norm'),
('attention.output.dense', 'self_attn.output'),
('attention.self', 'self_attn.self'),
('encoder.LayerNorm', 'encoder.layernorm_embedding'),
]
+ END_COMMON
)
A : str = [
'encdec/key/bias',
'encdec/query/bias',
'encdec/value/bias',
'self/key/bias',
'self/query/bias',
'self/value/bias',
'encdec_output/dense/bias',
'attention/output/dense/bias',
]
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] ) -> int:
"""simple docstring"""
for tf_name, hf_name in patterns:
lowercase__ = k.replace(__magic_name__ , __magic_name__ )
return k
def UpperCamelCase ( __magic_name__ : dict , __magic_name__ : dict ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
lowercase__ = BigBirdPegasusConfig(**__magic_name__ )
lowercase__ = BigBirdPegasusForConditionalGeneration(__magic_name__ )
lowercase__ = torch_model.state_dict()
lowercase__ = {}
# separating decoder weights
lowercase__ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
lowercase__ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ):
lowercase__ = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE]
if any(__magic_name__ ):
continue
lowercase__ = DECODER_PATTERNS
lowercase__ = rename_state_dict_key(__magic_name__ , __magic_name__ )
if new_k not in state_dict:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase__ = v.T
lowercase__ = torch.from_numpy(__magic_name__ )
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ):
lowercase__ = [k.endswith(__magic_name__ ) for ending in KEYS_TO_IGNORE]
if any(__magic_name__ ):
continue
lowercase__ = REMAINING_PATTERNS
lowercase__ = rename_state_dict_key(__magic_name__ , __magic_name__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
lowercase__ = v.T
lowercase__ = torch.from_numpy(__magic_name__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
lowercase__ = mapping["""model.embed_positions.weight"""]
lowercase__ = mapping.pop("""model.embed_positions.weight""" )
lowercase__ , lowercase__ = torch_model.load_state_dict(__magic_name__ , strict=__magic_name__ )
lowercase__ = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __magic_name__ : Tuple ) -> Dict:
"""simple docstring"""
lowercase__ = tf.train.list_variables(__magic_name__ )
lowercase__ = {}
lowercase__ = ["""global_step"""]
for name, shape in tqdm(__magic_name__ , desc="""converting tf checkpoint to dict""" ):
lowercase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase__ = tf.train.load_variable(__magic_name__ , __magic_name__ )
lowercase__ = array
return tf_weights
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : dict ) -> int:
"""simple docstring"""
lowercase__ = get_tf_weights_as_numpy(__magic_name__ )
lowercase__ = convert_bigbird_pegasus(__magic_name__ , __magic_name__ )
torch_model.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A : Any = parser.parse_args()
A : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 15
|
def a__ ( snake_case__ : int , snake_case__ : int ):
return x if y == 0 else greatest_common_divisor(snake_case__ , x % y )
def a__ ( snake_case__ : int , snake_case__ : int ):
return (x * y) // greatest_common_divisor(snake_case__ , snake_case__ )
def a__ ( snake_case__ : int = 20 ):
_UpperCAmelCase : Union[str, Any] = 1
for i in range(1 , n + 1 ):
_UpperCAmelCase : Dict = lcm(snake_case__ , snake_case__ )
return g
if __name__ == "__main__":
print(F'{solution() = }')
| 643
| 0
|
"""simple docstring"""
def A__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def A__ ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 704
|
"""simple docstring"""
import os
import string
import sys
lowerCamelCase : Any = 1 << 8
lowerCamelCase : Optional[int] = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 2_7,
"""up""": 6_5 + ARROW_KEY_FLAG,
"""down""": 6_6 + ARROW_KEY_FLAG,
"""right""": 6_7 + ARROW_KEY_FLAG,
"""left""": 6_8 + ARROW_KEY_FLAG,
"""mod_int""": 9_1,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 5_0,
"""delete""": 5_1,
"""pg_up""": 5_3,
"""pg_down""": 5_4,
}
lowerCamelCase : str = KEYMAP["""up"""]
lowerCamelCase : List[str] = KEYMAP["""left"""]
if sys.platform == "win32":
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = {
b"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
b"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
b"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
b"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
b"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(1_0):
lowerCamelCase : Tuple = ord(str(i))
def A__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
_SCREAMING_SNAKE_CASE = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCamelCase__ ) == 0:
# Read the keystroke
_SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(UpperCamelCase__ )
if ord(UpperCamelCase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_SCREAMING_SNAKE_CASE = chr(KEYMAP['''esc'''] )
except KeyError:
_SCREAMING_SNAKE_CASE = cha[1]
else:
_SCREAMING_SNAKE_CASE = ch.decode(UpperCamelCase__ )
else:
_SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_SCREAMING_SNAKE_CASE = sys.stdin.fileno()
_SCREAMING_SNAKE_CASE = termios.tcgetattr(UpperCamelCase__ )
try:
tty.setraw(UpperCamelCase__ )
_SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(UpperCamelCase__ , termios.TCSADRAIN , UpperCamelCase__ )
return ch
def A__ ( ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCamelCase__ ) == KEYMAP["esc"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) == KEYMAP["mod_int"]:
_SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(UpperCamelCase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCamelCase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCamelCase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 168
| 0
|
__snake_case = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__snake_case = frozenset(["""prompt""", """negative_prompt"""])
__snake_case = frozenset([])
__snake_case = frozenset(["""image"""])
__snake_case = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case = frozenset(["""image"""])
__snake_case = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__snake_case = frozenset(["""prompt""", """image""", """negative_prompt"""])
__snake_case = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__snake_case = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__snake_case = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case = frozenset(["""image""", """mask_image"""])
__snake_case = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__snake_case = frozenset(["""example_image""", """image""", """mask_image"""])
__snake_case = frozenset(["""class_labels"""])
__snake_case = frozenset(["""class_labels"""])
__snake_case = frozenset(["""batch_size"""])
__snake_case = frozenset([])
__snake_case = frozenset(["""batch_size"""])
__snake_case = frozenset([])
__snake_case = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__snake_case = frozenset(["""prompt""", """negative_prompt"""])
__snake_case = frozenset(["""input_tokens"""])
__snake_case = frozenset(["""input_tokens"""])
| 658
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
return None
class UpperCAmelCase_ :
"""simple docstring"""
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return None
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =[
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCAmelCase ( self ) -> int:
from transformers import BertModel
UpperCamelCase :int = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
UpperCamelCase :Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase :Union[str, Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCAmelCase ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :Tuple = self._test_export(SCREAMING_SNAKE_CASE_ , '''tf''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[Any] = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase :str = self._test_export(SCREAMING_SNAKE_CASE_ , '''pt''' , 12 , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE_ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[str]:
from transformers import BertModel
UpperCamelCase :List[Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :int = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase ( self ) -> List[Any]:
from transformers import TFBertModel
UpperCamelCase :Optional[Any] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
UpperCamelCase :Optional[Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''tf''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase :Tuple = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[Any] = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
UpperCamelCase :Tuple = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
UpperCamelCase , UpperCamelCase :Any = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase , UpperCamelCase :Tuple = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :str = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
| 658
| 1
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowercase ( snake_case, snake_case, snake_case=1E-1_2 ):
"""simple docstring"""
__magic_name__ :Tuple = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__A, axis=1 ), a_min=__A ) ).T
__magic_name__ :Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(__A, axis=1 ), a_min=__A ) ).T
return jnp.matmul(__A, norm_emb_a.T )
class lowerCamelCase_ ( nn.Module ):
a__ = 42
a__ = jnp.floataa
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = FlaxCLIPVisionModule(self.config.vision_config )
__magic_name__ :Dict = nn.Dense(self.config.projection_dim , use_bias=_UpperCamelCase , dtype=self.dtype )
__magic_name__ :str = self.param('''concept_embeds''' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
__magic_name__ :int = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__magic_name__ :Any = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (1_7,) )
__magic_name__ :Dict = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.vision_model(_UpperCamelCase )[1]
__magic_name__ :int = self.visual_projection(_UpperCamelCase )
__magic_name__ :int = jax_cosine_distance(_UpperCamelCase , self.special_care_embeds )
__magic_name__ :Dict = jax_cosine_distance(_UpperCamelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__magic_name__ :Optional[Any] = 0.0
__magic_name__ :List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__magic_name__ :List[str] = jnp.round(_UpperCamelCase , 3 )
__magic_name__ :Any = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCamelCase )
# Use a lower threshold if an image has any special care concept
__magic_name__ :List[Any] = is_special_care * 0.01
__magic_name__ :str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__magic_name__ :Optional[Any] = jnp.round(_UpperCamelCase , 3 )
__magic_name__ :Union[str, Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase_ ( __lowerCAmelCase ):
a__ = CLIPConfig
a__ = '''clip_input'''
a__ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
if input_shape is None:
__magic_name__ :int = (1, 2_2_4, 2_2_4, 3)
__magic_name__ :Union[str, Any] = self.module_class(config=_UpperCamelCase , dtype=_UpperCamelCase , **_UpperCamelCase )
super().__init__(_UpperCamelCase , _UpperCamelCase , input_shape=_UpperCamelCase , seed=_UpperCamelCase , dtype=_UpperCamelCase , _do_init=_do_init )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
__magic_name__ :List[Any] = jax.random.normal(_UpperCamelCase , _UpperCamelCase )
__magic_name__ :Optional[Any] = jax.random.split(_UpperCamelCase )
__magic_name__ :Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
__magic_name__ :Tuple = self.module.init(_UpperCamelCase , _UpperCamelCase )["""params"""]
return random_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ):
"""simple docstring"""
__magic_name__ :List[Any] = jnp.transpose(_UpperCamelCase , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(_UpperCamelCase , dtype=jnp.floataa ) , rngs={} , )
| 715
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
SCREAMING_SNAKE_CASE__ : Any = 8
def __lowercase ( snake_case, snake_case=BITS ):
"""simple docstring"""
__magic_name__ :int = x.device
__magic_name__ :Optional[Any] = (x * 2_5_5).int().clamp(0, 2_5_5 )
__magic_name__ :Dict = 2 ** torch.arange(bits - 1, -1, -1, device=snake_case )
__magic_name__ :Any = rearrange(snake_case, '''d -> d 1 1''' )
__magic_name__ :Dict = rearrange(snake_case, '''b c h w -> b c 1 h w''' )
__magic_name__ :Optional[Any] = ((x & mask) != 0).float()
__magic_name__ :List[str] = rearrange(snake_case, '''b c d h w -> b (c d) h w''' )
__magic_name__ :List[Any] = bits * 2 - 1
return bits
def __lowercase ( snake_case, snake_case=BITS ):
"""simple docstring"""
__magic_name__ :Optional[int] = x.device
__magic_name__ :Dict = (x > 0).int()
__magic_name__ :int = 2 ** torch.arange(bits - 1, -1, -1, device=snake_case, dtype=torch.intaa )
__magic_name__ :Optional[Any] = rearrange(snake_case, '''d -> d 1 1''' )
__magic_name__ :Union[str, Any] = rearrange(snake_case, '''b (c d) h w -> b c d h w''', d=8 )
__magic_name__ :Optional[Any] = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''' )
return (dec / 2_5_5).clamp(0.0, 1.0 )
def __lowercase ( self, snake_case, snake_case, snake_case, snake_case = 0.0, snake_case = True, snake_case=None, snake_case = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__magic_name__ :Union[str, Any] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__magic_name__ :Optional[Any] = self.alphas_cumprod[timestep]
__magic_name__ :List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__magic_name__ :Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ :List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__magic_name__ :Optional[int] = self.bit_scale
if self.config.clip_sample:
__magic_name__ :Dict = torch.clamp(snake_case, -scale, snake_case )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__magic_name__ :Optional[Any] = self._get_variance(snake_case, snake_case )
__magic_name__ :Tuple = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__magic_name__ :int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ :Tuple = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__magic_name__ :str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__magic_name__ :List[str] = model_output.device if torch.is_tensor(snake_case ) else '''cpu'''
__magic_name__ :Tuple = torch.randn(model_output.shape, dtype=model_output.dtype, generator=snake_case ).to(snake_case )
__magic_name__ :Optional[Any] = self._get_variance(snake_case, snake_case ) ** 0.5 * eta * noise
__magic_name__ :List[Any] = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case, pred_original_sample=snake_case )
def __lowercase ( self, snake_case, snake_case, snake_case, snake_case="epsilon", snake_case=None, snake_case = True, ):
"""simple docstring"""
__magic_name__ :int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__magic_name__ , __magic_name__ :Dict = torch.split(snake_case, sample.shape[1], dim=1 )
else:
__magic_name__ :Any = None
# 1. compute alphas, betas
__magic_name__ :List[str] = self.alphas_cumprod[t]
__magic_name__ :List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
__magic_name__ :Any = 1 - alpha_prod_t
__magic_name__ :Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__magic_name__ :str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__magic_name__ :List[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__magic_name__ :List[str] = self.bit_scale
if self.config.clip_sample:
__magic_name__ :int = torch.clamp(snake_case, -scale, snake_case )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :Any = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__magic_name__ :str = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ :List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ :int = 0
if t > 0:
__magic_name__ :Any = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=snake_case ).to(model_output.device )
__magic_name__ :int = (self._get_variance(snake_case, predicted_variance=snake_case ) ** 0.5) * noise
__magic_name__ :int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case, pred_original_sample=snake_case )
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , ):
"""simple docstring"""
super().__init__()
__magic_name__ :Any = bit_scale
__magic_name__ :List[Any] = (
ddim_bit_scheduler_step if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 2_5_6 , __lowerCAmelCase = 2_5_6 , __lowerCAmelCase = 5_0 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :List[str] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowerCAmelCase , )
__magic_name__ :List[str] = decimal_to_bits(__lowerCAmelCase ) * self.bit_scale
__magic_name__ :int = latents.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__magic_name__ :List[Any] = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ :Any = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
__magic_name__ :str = bits_to_decimal(__lowerCAmelCase )
if output_type == "pil":
__magic_name__ :Any = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 180
| 0
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
A__: Tuple = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
A__: Optional[int] = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
A__: int = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :int=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :List[str]=None , SCREAMING_SNAKE_CASE :List[Any]="auto" , SCREAMING_SNAKE_CASE :int=-1 , SCREAMING_SNAKE_CASE :Optional[Any]=0.9 , SCREAMING_SNAKE_CASE :List[Any]=5 , SCREAMING_SNAKE_CASE :Optional[int]=5_0_0 , SCREAMING_SNAKE_CASE :Optional[int]="gpt2-large" , SCREAMING_SNAKE_CASE :int=-1 , SCREAMING_SNAKE_CASE :str=1_0_2_4 , SCREAMING_SNAKE_CASE :Any=2_5 , SCREAMING_SNAKE_CASE :Any=5 , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :List[Any]=2_5 , ) -> Optional[Any]:
'''simple docstring'''
_a : Dict =compute_mauve(
p_text=SCREAMING_SNAKE_CASE , q_text=SCREAMING_SNAKE_CASE , p_features=SCREAMING_SNAKE_CASE , q_features=SCREAMING_SNAKE_CASE , p_tokens=SCREAMING_SNAKE_CASE , q_tokens=SCREAMING_SNAKE_CASE , num_buckets=SCREAMING_SNAKE_CASE , pca_max_data=SCREAMING_SNAKE_CASE , kmeans_explained_var=SCREAMING_SNAKE_CASE , kmeans_num_redo=SCREAMING_SNAKE_CASE , kmeans_max_iter=SCREAMING_SNAKE_CASE , featurize_model_name=SCREAMING_SNAKE_CASE , device_id=SCREAMING_SNAKE_CASE , max_text_length=SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=SCREAMING_SNAKE_CASE , mauve_scaling_factor=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , seed=SCREAMING_SNAKE_CASE , )
return out
| 694
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
class __a :
def __init__( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = size
UpperCAmelCase_ : Tuple = [0] * size
UpperCAmelCase_ : Optional[Any] = [0] * size
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( __magic_name__ : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ : int = value
while index < self.size:
UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1
if current_left_border == index:
UpperCAmelCase_ : List[str] = value
else:
UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ )
def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
UpperCAmelCase_ : List[str] = 0
while left <= right:
UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ )
if left <= current_left:
UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] )
UpperCAmelCase_ : Optional[Any] = current_left
else:
UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644
| 0
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
snake_case : Union[str, Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
snake_case : List[str] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
snake_case : Optional[int] = BeautifulSoup(res.text, 'html.parser')
snake_case : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get("href")}""")
| 566
|
'''simple docstring'''
import numpy as np
def lowercase__ ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566
| 1
|
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a__ ( lowerCAmelCase : str ):
'''simple docstring'''
return x + 2
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : Dict = "x = 3"
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : int = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
UpperCAmelCase__ : Optional[Any] = "x = y"
UpperCAmelCase__ : Optional[int] = {"y": 5}
UpperCAmelCase__ : Any = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 5, "y": 5} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = "y = add_two(x)"
UpperCAmelCase__ : List[str] = {"x": 3}
UpperCAmelCase__ : Tuple = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : Any = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = "x = 3"
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Dict = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : List[str] = "test_dict = {'x': x, 'y': add_two(x)}"
UpperCAmelCase__ : Optional[Any] = {"x": 3}
UpperCAmelCase__ : int = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self )-> Tuple:
UpperCAmelCase__ : Optional[int] = "x = 3\ny = 5"
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : List[str] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 5} )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Union[str, Any] = "text = f'This is x: {x}.'"
UpperCAmelCase__ : Any = {"x": 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__UpperCamelCase , {"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : Union[str, Any] = "if x <= 3:\n y = 2\nelse:\n y = 5"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Union[str, Any] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 2} )
UpperCAmelCase__ : Tuple = {"x": 8}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 8, "y": 5} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Optional[int] = "test_list = [x, add_two(x)]"
UpperCAmelCase__ : str = {"x": 3}
UpperCAmelCase__ : Tuple = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [3, 5] )
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase__ ( self )-> Optional[Any]:
UpperCAmelCase__ : int = "y = x"
UpperCAmelCase__ : Optional[int] = {"x": 3}
UpperCAmelCase__ : Optional[int] = evaluate(__UpperCamelCase , {} , state=__UpperCamelCase )
assert result == 3
self.assertDictEqual(__UpperCamelCase , {"x": 3, "y": 3} )
def lowerCAmelCase__ ( self )-> List[str]:
UpperCAmelCase__ : Any = "test_list = [x, add_two(x)]\ntest_list[1]"
UpperCAmelCase__ : Tuple = {"x": 3}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_list": [3, 5]} )
UpperCAmelCase__ : List[Any] = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
UpperCAmelCase__ : Union[str, Any] = {"x": 3}
UpperCAmelCase__ : Optional[int] = evaluate(__UpperCamelCase , {"add_two": add_two} , state=__UpperCamelCase )
assert result == 5
self.assertDictEqual(__UpperCamelCase , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase__ ( self )-> Any:
UpperCAmelCase__ : Any = "x = 0\nfor i in range(3):\n x = i"
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : str = evaluate(__UpperCamelCase , {"range": range} , state=__UpperCamelCase )
assert result == 2
self.assertDictEqual(__UpperCamelCase , {"x": 2, "i": 2} )
| 660
|
"""simple docstring"""
from timeit import timeit
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Tuple = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
'''simple docstring'''
def do_benchmark(lowerCAmelCase : int ) -> None:
UpperCAmelCase__ : Dict = "import __main__ as z"
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=lowerCAmelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase ) = }" )
UpperCAmelCase__ : Any = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=lowerCAmelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 660
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowercase__ : List[str] = ""
lowercase__ : Tuple = ""
lowercase__ : Optional[int] = ""
lowercase__ : int = 1 # (0 is vertical, 1 is horizontal)
def __lowercase ( ):
snake_case_, snake_case_ : Dict = get_dataset(__A , __A )
print('''Processing...''' )
snake_case_, snake_case_, snake_case_ : List[str] = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ : List[str] = random_chars(32 )
snake_case_ : Union[str, Any] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case_ : Union[str, Any] = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(__A )} with {file_name}" )
snake_case_ : Union[str, Any] = []
for anno in new_annos[index]:
snake_case_ : Union[str, Any] = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(f"/{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __lowercase ( _a , _a ):
snake_case_ : List[str] = []
snake_case_ : Dict = []
for label_file in glob.glob(os.path.join(__A , '''*.txt''' ) ):
snake_case_ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__A ) as in_file:
snake_case_ : str = in_file.readlines()
snake_case_ : str = os.path.join(__A , f"{label_name}.jpg" )
snake_case_ : int = []
for obj_list in obj_lists:
snake_case_ : Dict = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def __lowercase ( _a , _a , _a = 1 ):
snake_case_ : Union[str, Any] = []
snake_case_ : Union[str, Any] = []
snake_case_ : Union[str, Any] = []
for idx in range(len(__A ) ):
snake_case_ : Dict = []
snake_case_ : Optional[int] = img_list[idx]
path_list.append(__A )
snake_case_ : Any = anno_list[idx]
snake_case_ : Union[str, Any] = cva.imread(__A )
if flip_type == 1:
snake_case_ : str = cva.flip(__A , __A )
for bbox in img_annos:
snake_case_ : Tuple = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case_ : Dict = cva.flip(__A , __A )
for bbox in img_annos:
snake_case_ : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def __lowercase ( _a = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case_ : str = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 123
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """dandelin/vilt-b32-finetuned-vqa"""
__lowercase = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__lowercase = """image_qa"""
__lowercase = AutoProcessor
__lowercase = AutoModelForVisualQuestionAnswering
__lowercase = ["""image""", """text"""]
__lowercase = ["""text"""]
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
requires_backends(self , ['vision'] )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
return self.pre_processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**lowerCAmelCase_ ).logits
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 495
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A_ ( __lowerCAmelCase ):
def __init__( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[int] = None ,__lowerCAmelCase: Dict = None ,__lowerCAmelCase: Dict = True ,__lowerCAmelCase: Optional[Any] = None ,__lowerCAmelCase: int = False ,__lowerCAmelCase: List[str] = None ,__lowerCAmelCase: Tuple = True ,__lowerCAmelCase: List[Any] = "arrow" ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
super().__init__(
split=lowerCAmelCase_ ,features=lowerCAmelCase_ ,cache_dir=lowerCAmelCase_ ,keep_in_memory=lowerCAmelCase_ ,streaming=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
_lowerCamelCase : Union[str, Any] = load_from_cache_file
_lowerCamelCase : Optional[Any] = file_format
_lowerCamelCase : List[str] = Spark(
df=lowerCAmelCase_ ,features=lowerCAmelCase_ ,cache_dir=lowerCAmelCase_ ,working_dir=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
def _lowercase ( self: Dict ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCamelCase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase_ ,file_format=self._file_format ,)
return self.builder.as_dataset(split=self.split )
| 717
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class A_ ( _a ):
lowerCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'image': Image()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = "image"
lowerCAmelCase__ = "labels"
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCamelCase : List[Any] = copy.deepcopy(self )
_lowerCamelCase : Union[str, Any] = self.label_schema.copy()
_lowerCamelCase : Optional[int] = features[self.label_column]
_lowerCamelCase : Tuple = label_schema
return task_template
@property
def _lowercase ( self: int ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 386
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _snake_case ( unittest.TestCase ):
@property
def snake_case__ ( self):
torch.manual_seed(0)
UpperCAmelCase__ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def snake_case__ ( self):
UpperCAmelCase__ : int = self.dummy_uncond_unet
UpperCAmelCase__ : Dict = KarrasVeScheduler()
UpperCAmelCase__ : str = KarrasVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase)
pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
UpperCAmelCase__ : Any = torch.manual_seed(0)
UpperCAmelCase__ : Any = pipe(num_inference_steps=2 , generator=_lowerCamelCase , output_type="""numpy""").images
UpperCAmelCase__ : List[Any] = torch.manual_seed(0)
UpperCAmelCase__ : str = pipe(num_inference_steps=2 , generator=_lowerCamelCase , output_type="""numpy""" , return_dict=_lowerCamelCase)[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : int = """google/ncsnpp-celebahq-256"""
UpperCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained(_lowerCamelCase)
UpperCAmelCase__ : Dict = KarrasVeScheduler()
UpperCAmelCase__ : Dict = KarrasVePipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase)
pipe.to(_lowerCamelCase)
pipe.set_progress_bar_config(disable=_lowerCamelCase)
UpperCAmelCase__ : str = torch.manual_seed(0)
UpperCAmelCase__ : int = pipe(num_inference_steps=20 , generator=_lowerCamelCase , output_type="""numpy""").images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase__ : Any = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 407
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
lowerCAmelCase :UNetaDModel
lowerCAmelCase :ScoreSdeVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase)
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2000 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = self.unet.config.sample_size
UpperCAmelCase__ : Any = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ : Optional[int] = self.unet
UpperCAmelCase__ : Any = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase) * self.scheduler.init_noise_sigma
UpperCAmelCase__ : Optional[int] = sample.to(self.device)
self.scheduler.set_timesteps(_lowerCamelCase)
self.scheduler.set_sigmas(_lowerCamelCase)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
UpperCAmelCase__ : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
UpperCAmelCase__ : List[str] = self.unet(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : List[Any] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase).prev_sample
# prediction step
UpperCAmelCase__ : Any = model(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ : Optional[Any] = sample_mean.clamp(0 , 1)
UpperCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(_lowerCamelCase)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase)
| 407
| 1
|
"""simple docstring"""
from PIL import Image
def lowercase__ ( lowerCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = image.size
UpperCAmelCase = 0
UpperCAmelCase = image.load()
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
UpperCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCAmelCase ):
for i in range(lowerCAmelCase ):
UpperCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 717
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=4 , ) -> Dict:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_choices
def a_ ( self ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_attention_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = True
UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : int = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a_ ( self ) -> Dict:
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a_ ( self ) -> Tuple:
UpperCAmelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , lowercase_ )
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def a_ ( self ) -> int:
UpperCAmelCase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=lowercase_ )
UpperCAmelCase = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCAmelCase = model(lowercase_ )[0]
# compare the actual values for a slice.
UpperCAmelCase = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 183
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = torch.load(_UpperCAmelCase, map_location='cpu' )
lowerCAmelCase : int = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCAmelCase : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCAmelCase : Optional[Any] = v
else:
lowerCAmelCase : int = v
lowerCAmelCase : List[str] = chkpt['params']
lowerCAmelCase : Dict = {n: v for n, v in config.items() if not isinstance(_UpperCAmelCase, (torch.FloatTensor, numpy.ndarray) )}
lowerCAmelCase : Any = chkpt['dico_word2id']
lowerCAmelCase : List[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@', '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCAmelCase : str = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCAmelCase : int = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCAmelCase : Dict = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(_UpperCAmelCase, _UpperCAmelCase )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(_UpperCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase, indent=2 ) + '\n' )
print(f"Save vocab file to {pytorch_config_dump_path}" )
with open(_UpperCAmelCase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCAmelCase, indent=2 ) + '\n' )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A : Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 343
|
class __A :
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Optional[Any] = name
lowerCAmelCase : int = val
def __str__( self : str ):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
return self.val < other.val
class __A :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str ):
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Optional[Any] = self.build_heap(UpperCAmelCase_ )
def __getitem__( self : Union[str, Any] , UpperCAmelCase_ : str ):
return self.get_value(UpperCAmelCase_ )
def lowercase__ ( self : int , UpperCAmelCase_ : Any ):
return (idx - 1) // 2
def lowercase__ ( self : int , UpperCAmelCase_ : str ):
return idx * 2 + 1
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
return idx * 2 + 2
def lowercase__ ( self : List[str] , UpperCAmelCase_ : List[Any] ):
return self.heap_dict[key]
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = len(UpperCAmelCase_ ) - 1
lowerCAmelCase : Union[str, Any] = self.get_parent_idx(UpperCAmelCase_ )
for idx, i in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = idx
lowerCAmelCase : Union[str, Any] = i.val
for i in range(UpperCAmelCase_ , -1 , -1 ):
self.sift_down(UpperCAmelCase_ , UpperCAmelCase_ )
return array
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
while True:
lowerCAmelCase : Optional[int] = self.get_left_child_idx(UpperCAmelCase_ ) # noqa: E741
lowerCAmelCase : Union[str, Any] = self.get_right_child_idx(UpperCAmelCase_ )
lowerCAmelCase : Any = idx
if l < len(UpperCAmelCase_ ) and array[l] < array[idx]:
lowerCAmelCase : Tuple = l
if r < len(UpperCAmelCase_ ) and array[r] < array[smallest]:
lowerCAmelCase : Any = r
if smallest != idx:
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase : List[str] = smallest
else:
break
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.heap[idx], self.heap[p]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase : Dict = p
lowerCAmelCase : Optional[Any] = self.get_parent_idx(UpperCAmelCase_ )
def lowercase__ ( self : str ):
return self.heap[0]
def lowercase__ ( self : int ):
lowerCAmelCase , lowerCAmelCase : str = self.heap[-1], self.heap[0]
lowerCAmelCase , lowerCAmelCase : Dict = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
self.heap.append(UpperCAmelCase_ )
lowerCAmelCase : str = len(self.heap ) - 1
lowerCAmelCase : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase__ ( self : Optional[int] ):
return len(self.heap ) == 0
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase : Optional[int] = new_value
lowerCAmelCase : str = new_value
self.sift_up(self.idx_of_element[node] )
__A : Tuple = Node('''R''', -1)
__A : int = Node('''B''', 6)
__A : int = Node('''A''', 3)
__A : Optional[Any] = Node('''X''', 1)
__A : List[str] = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__A : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase : str ):
UpperCAmelCase__ : Optional[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase__ : str = set()
return any(
node not in visited and depth_first_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
for node in graph )
def a__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
visited.add(_lowerCAmelCase )
rec_stk.add(_lowerCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_lowerCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowercase ( unittest.TestCase , lowerCAmelCase_ ):
'''simple docstring'''
def lowerCAmelCase__ ( self )-> Dict:
UpperCAmelCase__ : Optional[Any] = load_tool("text-classification" )
self.tool.setup()
UpperCAmelCase__ : List[str] = load_tool("text-classification" , remote=__UpperCamelCase )
def lowerCAmelCase__ ( self )-> Union[str, Any]:
UpperCAmelCase__ : Dict = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : List[Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> Optional[int]:
UpperCAmelCase__ : Any = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
def lowerCAmelCase__ ( self )-> str:
UpperCAmelCase__ : str = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__UpperCamelCase , "positive" )
| 660
| 0
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : Optional[Any] ) -> List[str]:
_lowerCamelCase = 0
_lowerCamelCase = len(lowercase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
_lowerCamelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_lowerCamelCase = left
_lowerCamelCase = point
elif point > right:
_lowerCamelCase = right
_lowerCamelCase = point
else:
if item < current_item:
_lowerCamelCase = point - 1
else:
_lowerCamelCase = point + 1
return None
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Any ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_lowerCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
elif point > right:
return interpolation_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase_ , lowercase_ , lowercase_ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase_ , lowercase_ , point + 1 , lowercase_ )
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Union[str, Any]:
if collection != sorted(lowercase_ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
__SCREAMING_SNAKE_CASE : int = 0
if debug == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
__SCREAMING_SNAKE_CASE : Tuple = 6_7
__SCREAMING_SNAKE_CASE : List[str] = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 661
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 661
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( __a , __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : Tuple = frozenset([] )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowercase , )
_UpperCamelCase: List[str] = PNDMScheduler(skip_prk_steps=_lowercase )
torch.manual_seed(0 )
_UpperCamelCase: List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCamelCase: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
_UpperCamelCase: Optional[int] = CLIPTextModel(_lowercase )
_UpperCamelCase: Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCamelCase: Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase ( self : Optional[int] , _lowercase : Any , _lowercase : int=0 ):
"""simple docstring"""
_UpperCamelCase: Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
_UpperCamelCase: Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase: Optional[int] = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((64, 64) )
_UpperCamelCase: Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_lowercase ).startswith('''mps''' ):
_UpperCamelCase: Dict = torch.manual_seed(_lowercase )
else:
_UpperCamelCase: Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_UpperCamelCase: List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase: str = self.get_dummy_components()
_UpperCamelCase: Any = StableDiffusionInpaintPipeline(**_lowercase )
_UpperCamelCase: Any = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Tuple = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: List[Any] = sd_pipe(**_lowercase ).images
_UpperCamelCase: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase: List[str] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_UpperCamelCase: Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_UpperCamelCase: Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
_UpperCamelCase: Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCamelCase: Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_lowercase , safety_checker=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
_UpperCamelCase: List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCamelCase: List[Any] = torch.manual_seed(0 )
_UpperCamelCase: str = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type='''np''' , )
_UpperCamelCase: Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_UpperCamelCase: Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_UpperCamelCase: str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
_UpperCamelCase: Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCamelCase: int = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
_UpperCamelCase: Optional[Any] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCamelCase: Dict = torch.manual_seed(0 )
_UpperCamelCase: int = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , output_type='''np''' , )
_UpperCamelCase: Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase: Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
_UpperCamelCase: Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
_UpperCamelCase: Optional[int] = '''stabilityai/stable-diffusion-2-inpainting'''
_UpperCamelCase: Union[str, Any] = PNDMScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
_UpperCamelCase: Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , scheduler=_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase: Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
_UpperCamelCase: Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase: Union[str, Any] = pipe(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase: Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 264
|
from __future__ import annotations
class __magic_name__ :
"""simple docstring"""
def __init__( self : Any , _lowercase : Dict=None ):
"""simple docstring"""
_UpperCamelCase: Tuple = data
_UpperCamelCase: int = None
def __repr__( self : Dict ):
"""simple docstring"""
_UpperCamelCase: int = []
_UpperCamelCase: str = self
while temp:
string_rep.append(f"""{temp.data}""" )
_UpperCamelCase: Optional[Any] = temp.next
return "->".join(_lowercase )
def lowerCAmelCase_ ( lowercase: list ) -> Optional[int]:
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
_UpperCamelCase: Optional[Any] = Node(elements_list[0] )
for i in range(1 , len(lowercase ) ):
_UpperCamelCase: Union[str, Any] = Node(elements_list[i] )
_UpperCamelCase: Optional[int] = current.next
return head
def lowerCAmelCase_ ( lowercase: Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowercase , lowercase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
from doctest import testmod
testmod()
_UpperCamelCase: Any = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(lowercase )
print('''Elements in Reverse:''' )
print_reverse(lowercase )
if __name__ == "__main__":
main()
| 264
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase ( UpperCamelCase__ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def lowerCAmelCase ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ):
"""simple docstring"""
__UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(UpperCamelCase__ , UpperCamelCase__ )
# Predict target for test data
__UpperCAmelCase = xgb.predict(UpperCamelCase__ )
__UpperCAmelCase = predictions.reshape(len(UpperCamelCase__ ) , 1 )
return predictions
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = fetch_california_housing()
__UpperCAmelCase , __UpperCAmelCase = data_handling(UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(
UpperCamelCase__ , UpperCamelCase__ , test_size=0.25 , random_state=1 )
__UpperCAmelCase = xgboost(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
print(f"""Mean Square Error : {mean_squared_error(UpperCamelCase__ , UpperCamelCase__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 262
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A :
def __init__( self : List[str] , __a : Any , __a : int=9_9 , __a : Any=1_3 , __a : Tuple=7 , __a : Tuple=9 , __a : Tuple=True , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Optional[Any]=3_2 , __a : str=5 , __a : Optional[int]=4 , __a : Union[str, Any]=3_7 , __a : List[str]=8 , __a : Optional[int]=0.1 , __a : List[str]=0.0_0_2 , __a : List[Any]=1 , __a : str=0 , __a : Dict=0 , __a : int=None , __a : List[Any]=None , ) -> Tuple:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = encoder_seq_length
__UpperCAmelCase = decoder_seq_length
# For common tests
__UpperCAmelCase = self.decoder_seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = d_ff
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = decoder_start_token_id
__UpperCAmelCase = None
__UpperCAmelCase = decoder_layers
def snake_case__ ( self : Union[str, Any] ) -> int:
return TaConfig.from_pretrained('''google/umt5-base''' )
def snake_case__ ( self : List[Any] , __a : List[str] , __a : str , __a : Optional[int] , __a : List[Any]=None , __a : List[Any]=None , __a : Any=None , __a : str=None , __a : Any=None , ) -> List[Any]:
if attention_mask is None:
__UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
__UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
__UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case__ ( self : List[str] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = config.num_attention_heads
__UpperCAmelCase = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : int ) -> Optional[int]:
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self : Optional[int] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self : int , __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : Optional[Any] , __a : int , ) -> List[Any]:
__UpperCAmelCase = UMTaModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
__UpperCAmelCase = model(input_ids=__a , decoder_input_ids=__a )
__UpperCAmelCase = result.last_hidden_state
__UpperCAmelCase = result.past_key_values
__UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case__ ( self : List[str] , __a : Any , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : Any , ) -> Optional[Any]:
__UpperCAmelCase = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
__UpperCAmelCase = model(__a , use_cache=__a )
__UpperCAmelCase = model(__a )
__UpperCAmelCase = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase = model(__a )['''last_hidden_state''']
__UpperCAmelCase = model(__a , past_key_values=__a )['''last_hidden_state''']
# select random slice
__UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case__ ( self : List[Any] , __a : Union[str, Any] , __a : Dict , ) -> Optional[int]:
__UpperCAmelCase = UMTaModel(config=__a ).to(__a ).half().eval()
__UpperCAmelCase = model(**__a )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def snake_case__ ( self : str ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__a , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def snake_case__ ( self : List[Any] ) -> str:
__UpperCAmelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = config_and_inputs[0]
__UpperCAmelCase = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
__UpperCAmelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__a ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
__UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
__UpperCAmelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def snake_case__ ( self : Optional[int] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__a ).to(__a )
__UpperCAmelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__a , legacy=__a )
__UpperCAmelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__UpperCAmelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a ).input_ids
# fmt: off
__UpperCAmelCase = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
__UpperCAmelCase = model.generate(input_ids.to(__a ) )
__UpperCAmelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__UpperCAmelCase = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a )
| 262
| 1
|
import os
def snake_case () -> Optional[Any]:
'''simple docstring'''
_snake_case : Tuple = os.path.dirname(os.path.realpath(__lowercase ) )
_snake_case : Optional[Any] = os.path.join(__lowercase , "triangle.txt" )
with open(__lowercase ) as f:
_snake_case : Tuple = f.readlines()
_snake_case : List[Any] = []
for line in triangle:
_snake_case : List[Any] = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(__lowercase ) )
a.append(__lowercase )
for i in range(1 , len(__lowercase ) ):
for j in range(len(a[i] ) ):
_snake_case : List[str] = a[i - 1][j] if j != len(a[i - 1] ) else 0
_snake_case : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowercase , __lowercase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 580
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="resnet50" , lowercase_=3 , lowercase_=32 , lowercase_=3 , lowercase_=True , lowercase_=True , ):
_snake_case : Any = parent
_snake_case : int = out_indices if out_indices is not None else [4]
_snake_case : Any = stage_names
_snake_case : Optional[Any] = out_features
_snake_case : Dict = backbone
_snake_case : List[str] = batch_size
_snake_case : Optional[int] = image_size
_snake_case : str = num_channels
_snake_case : Optional[Any] = use_pretrained_backbone
_snake_case : str = is_training
def UpperCamelCase ( self ):
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[str] = self.get_config()
return config, pixel_values
def UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
_snake_case : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCamelCase ( self ):
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case ,_snake_case : List[Any] = config_and_inputs
_snake_case : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCamelCase = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Dict = TimmBackboneModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self ):
_snake_case : Dict = "resnet18"
_snake_case : Tuple = "microsoft/resnet-18"
_snake_case : Tuple = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_snake_case : List[str] = AutoBackbone.from_pretrained(lowercase_ , use_timm_backbone=lowercase_ , out_indices=[1, 2, 3] )
_snake_case : Optional[int] = AutoBackbone.from_pretrained(lowercase_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def UpperCamelCase ( self ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def UpperCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(lowercase_ )
_snake_case : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Union[str, Any] = [*signature.parameters.keys()]
_snake_case : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = True
_snake_case : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_snake_case : Dict = self.all_model_classes[0]
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
_snake_case : List[str] = self._prepare_for_class(lowercase_ , lowercase_ )
_snake_case : List[Any] = model(**lowercase_ )
_snake_case : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
_snake_case : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_snake_case : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_snake_case : Union[str, Any] = copy.deepcopy(lowercase_ )
_snake_case : int = None
_snake_case : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : int = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_snake_case : Dict = copy.deepcopy(lowercase_ )
_snake_case : Dict = False
_snake_case : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
_snake_case : Any = model(**lowercase_ )
| 580
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( __a):
__SCREAMING_SNAKE_CASE : List[Any] = 'switch_transformers'
__SCREAMING_SNAKE_CASE : Optional[Any] = ['past_key_values']
__SCREAMING_SNAKE_CASE : Dict = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any] , __UpperCamelCase : Optional[int]=32_128 , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : Optional[Any]=64 , __UpperCamelCase : int=2_048 , __UpperCamelCase : Optional[Any]=64 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Dict=12 , __UpperCamelCase : str=3 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : int=8 , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[Any]=0.01 , __UpperCamelCase : int="float32" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Tuple=32 , __UpperCamelCase : List[Any]=128 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Optional[int]=1e-6 , __UpperCamelCase : Any=0.001 , __UpperCamelCase : int=0.001 , __UpperCamelCase : List[str]=1.0 , __UpperCamelCase : str="relu" , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Union[str, Any]=1 , **__UpperCamelCase : Union[str, Any] , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_sparse_encoder_layers
_UpperCAmelCase = num_layers
_UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = num_experts
_UpperCAmelCase = expert_capacity
_UpperCAmelCase = router_bias
_UpperCAmelCase = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_UpperCAmelCase = router_dtype
_UpperCAmelCase = router_ignore_padding_tokens
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = feed_forward_proj
_UpperCAmelCase = use_cache
_UpperCAmelCase = add_router_probs
_UpperCAmelCase = router_z_loss_coef
_UpperCAmelCase = router_aux_loss_coef
_UpperCAmelCase = self.feed_forward_proj.split("-" )
_UpperCAmelCase = act_info[-1]
_UpperCAmelCase = act_info[0] == "gated"
if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase = "gelu_new"
super().__init__(
pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , )
| 684
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __UpperCAmelCase :
def __init__( self ):
lowerCAmelCase_ = {}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 ):
if self.graph.get(_lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCAmelCase_ = [[w, v]]
if not self.graph.get(_lowerCamelCase ):
lowerCAmelCase_ = []
def UpperCAmelCase_ ( self ):
return list(self.graph )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 , _lowerCamelCase=-1 ):
if s == d:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def UpperCAmelCase_ ( self , _lowerCamelCase=-1 ):
if c == -1:
lowerCAmelCase_ = floor(random() * 1_0000 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase , _lowerCamelCase , 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 ):
lowerCAmelCase_ = deque()
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
lowerCAmelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCAmelCase_ ( self , _lowerCamelCase ):
return len(self.graph[u] )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = s
lowerCAmelCase_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return sorted_nodes
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(_lowerCamelCase )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(_lowerCamelCase )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 , _lowerCamelCase=-1 ):
lowerCAmelCase_ = time()
self.dfs(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = time()
return end - begin
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 ):
lowerCAmelCase_ = time()
self.bfs(_lowerCamelCase )
lowerCAmelCase_ = time()
return end - begin
class __UpperCAmelCase :
def __init__( self ):
lowerCAmelCase_ = {}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 ):
# check if the u exists
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCAmelCase_ = [[w, v]]
# add the other way
if self.graph.get(_lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCAmelCase_ = [[w, u]]
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCamelCase )
# the other way round
if self.graph.get(_lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 , _lowerCamelCase=-1 ):
if s == d:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return visited
def UpperCAmelCase_ ( self , _lowerCamelCase=-1 ):
if c == -1:
lowerCAmelCase_ = floor(random() * 1_0000 ) + 10
for i in range(_lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCAmelCase_ = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCamelCase , _lowerCamelCase , 1 )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 ):
lowerCAmelCase_ = deque()
lowerCAmelCase_ = []
if s == -2:
lowerCAmelCase_ = list(self.graph )[0]
d.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
while d:
lowerCAmelCase_ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCAmelCase_ ( self , _lowerCamelCase ):
return len(self.graph[u] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(_lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(_lowerCamelCase )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return list(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = list(self.graph )[0]
stack.append(_lowerCamelCase )
visited.append(_lowerCamelCase )
lowerCAmelCase_ = -2
lowerCAmelCase_ = []
lowerCAmelCase_ = s
lowerCAmelCase_ = False
lowerCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCAmelCase_ = len(_lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCAmelCase_ = True
if len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = stack[len(_lowerCamelCase ) - 1]
else:
lowerCAmelCase_ = False
indirect_parents.append(_lowerCamelCase )
lowerCAmelCase_ = s
lowerCAmelCase_ = ss
# check if se have reached the starting point
if len(_lowerCamelCase ) == 0:
return False
def UpperCAmelCase_ ( self ):
return list(self.graph )
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 , _lowerCamelCase=-1 ):
lowerCAmelCase_ = time()
self.dfs(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = time()
return end - begin
def UpperCAmelCase_ ( self , _lowerCamelCase=-2 ):
lowerCAmelCase_ = time()
self.bfs(_lowerCamelCase )
lowerCAmelCase_ = time()
return end - begin
| 274
| 0
|
def a__ ( lowerCAmelCase : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase : int = 100 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : int = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Optional[int] = pre_numerator
UpperCAmelCase__ : Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : List[Any] = cur_numerator
UpperCAmelCase__ : str = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ''
_A = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> str:
super().__init__(self , **__UpperCamelCase )
UpperCAmelCase__ : int = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Optional[Any] = None
def lowerCAmelCase__ ( self )-> Optional[Any]:
if self.dir_cache is None:
UpperCAmelCase__ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : str = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , )-> List[Any]:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Union[str, Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self , __UpperCamelCase , **__UpperCamelCase )-> List[str]:
self._get_dirs()
UpperCAmelCase__ : Union[str, Any] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase )-> str:
self._get_dirs()
UpperCAmelCase__ : str = PurePosixPath(path.strip("/" ) )
UpperCAmelCase__ : Optional[Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : Optional[int] = PurePosixPath(p.strip("/" ) )
UpperCAmelCase__ : Dict = p.parent
if root == path:
UpperCAmelCase__ : Tuple = f
UpperCAmelCase__ : List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out )
| 660
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
snake_case = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
snake_case = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
snake_case = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_snake_case = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_snake_case = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 103
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659
| 0
|
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def A ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase__ :int = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , SCREAMING_SNAKE_CASE ).groups()[0]
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
def __init__( self , A , A=None , A=None ) ->Any:
UpperCAmelCase__ :int = file_names
UpperCAmelCase__ :List[str] = image_transform
UpperCAmelCase__ :Optional[int] = label_to_id
def __len__( self ) ->str:
return len(self.file_names )
def __getitem__( self , A ) ->Tuple:
UpperCAmelCase__ :str = self.file_names[idx]
UpperCAmelCase__ :Optional[Any] = PIL.Image.open(A )
UpperCAmelCase__ :Tuple = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase__ :int = self.image_transform(A )
UpperCAmelCase__ :Tuple = extract_label(A )
if self.label_to_id is not None:
UpperCAmelCase__ :str = self.label_to_id[label]
return {"image": image, "label": label}
def A ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if args.with_tracking:
UpperCAmelCase__ :Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase__ :Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ :Union[str, Any] = config['lr']
UpperCAmelCase__ :Optional[int] = int(config['num_epochs'] )
UpperCAmelCase__ :Union[str, Any] = int(config['seed'] )
UpperCAmelCase__ :Optional[int] = int(config['batch_size'] )
UpperCAmelCase__ :Dict = config['image_size']
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
UpperCAmelCase__ :List[str] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase__ :int = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase__ :Optional[int] = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase__ :Tuple = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase__ :Optional[int] = os.path.split(SCREAMING_SNAKE_CASE )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Grab all the image filenames
UpperCAmelCase__ :int = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase__ :List[str] = [extract_label(SCREAMING_SNAKE_CASE ) for fname in file_names]
UpperCAmelCase__ :List[str] = list(set(SCREAMING_SNAKE_CASE ) )
id_to_label.sort()
UpperCAmelCase__ :Tuple = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE )
torch.manual_seed(SCREAMING_SNAKE_CASE )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE )
# Split our filenames between train and validation
UpperCAmelCase__ :Dict = np.random.permutation(len(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :int = int(0.8 * len(SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ :Tuple = random_perm[:cut]
UpperCAmelCase__ :List[str] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase__ :str = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase__ :Union[str, Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE , label_to_id=SCREAMING_SNAKE_CASE )
# For evaluation, we use a deterministic Resize
UpperCAmelCase__ :int = Compose([Resize(SCREAMING_SNAKE_CASE ), ToTensor()] )
UpperCAmelCase__ :Any = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE , label_to_id=SCREAMING_SNAKE_CASE )
# Instantiate dataloaders.
UpperCAmelCase__ :Tuple = DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , num_workers=4 )
UpperCAmelCase__ :Dict = DataLoader(SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ :Dict = create_model('resnet50d' , pretrained=SCREAMING_SNAKE_CASE , num_classes=len(SCREAMING_SNAKE_CASE ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ :Tuple = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase__ :List[Any] = False
for param in model.get_classifier().parameters():
UpperCAmelCase__ :Any = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase__ :Any = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase__ :int = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ :Tuple = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase__ :List[Any] = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE , max_lr=SCREAMING_SNAKE_CASE , epochs=SCREAMING_SNAKE_CASE , steps_per_epoch=len(SCREAMING_SNAKE_CASE ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ :Optional[int] = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase__ :Dict = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase__ :str = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase__ :Any = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase__ :int = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase__ :int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase__ :Union[str, Any] = os.path.splitext(SCREAMING_SNAKE_CASE )[0]
if "epoch" in training_difference:
UpperCAmelCase__ :str = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase__ :Dict = None
else:
UpperCAmelCase__ :Optional[int] = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase__ :List[str] = resume_step // len(SCREAMING_SNAKE_CASE )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
model.train()
if args.with_tracking:
UpperCAmelCase__ :str = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase__ :Union[str, Any] = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase__ :Optional[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase__ :Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase__ :List[Any] = (batch['image'] - mean) / std
UpperCAmelCase__ :Dict = model(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :str = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ :Union[str, Any] = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase__ :Tuple = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ :List[Any] = 0
UpperCAmelCase__ :Optional[int] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase__ :Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase__ :Optional[int] = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase__ :Dict = model(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ :Dict = outputs.argmax(dim=-1 )
UpperCAmelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase__ :Dict = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase__ :Dict = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE , )
if checkpointing_steps == "epoch":
UpperCAmelCase__ :Union[str, Any] = f"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase__ :List[str] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
accelerator.save_state(SCREAMING_SNAKE_CASE )
if args.with_tracking:
accelerator.end_training()
def A ( ):
"""simple docstring"""
UpperCAmelCase__ :List[str] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=SCREAMING_SNAKE_CASE , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase__ :Optional[int] = parser.parse_args()
UpperCAmelCase__ :str = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 707
|
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , A ) ->None:
UpperCAmelCase__ :Dict = data
# Initialize hash values
UpperCAmelCase__ :str = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
UpperCAmelCase__ :str = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
UpperCAmelCase__ :Any = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( A ) ->bytes:
UpperCAmelCase__ :List[Any] = b'\x80' + (b'\x00' * (63 - (len(A ) + 8) % 64))
UpperCAmelCase__ :Optional[int] = struct.pack('>Q' , (len(A ) * 8) )
return data + padding + big_endian_integer
def A__ ( self ) ->None:
# Convert into blocks of 64 bytes
UpperCAmelCase__ :List[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase__ :Any = list(struct.unpack('>16L' , A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase__ :Optional[Any] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase__ :Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase__ :str = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
UpperCAmelCase__ :Tuple = self.ror(A , 6 ) ^ self.ror(A , 11 ) ^ self.ror(A , 25 )
UpperCAmelCase__ :int = (e & f) ^ ((~e & 0xff_fff_fff) & g)
UpperCAmelCase__ :str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
UpperCAmelCase__ :Optional[int] = self.ror(A , 2 ) ^ self.ror(A , 13 ) ^ self.ror(A , 22 )
UpperCAmelCase__ :Optional[int] = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase__ :Optional[int] = (sa + maj) % 0x100_000_000
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
UpperCAmelCase__ :str = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase__ :Tuple = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase__ :Dict = ''.join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self , A , A ) ->int:
return 0xff_fff_fff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def A__ ( self ) ->None:
import hashlib
UpperCAmelCase__ :Optional[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(A ).hash , hashlib.shaaaa(A ).hexdigest() )
def A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
UpperCAmelCase__ :List[Any] = parser.parse_args()
UpperCAmelCase__ :List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
UpperCAmelCase__ :Optional[Any] = f.read()
else:
UpperCAmelCase__ :Any = bytes(SCREAMING_SNAKE_CASE , 'utf-8' )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 433
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99
|
from __future__ import annotations
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_lowercase : Dict = word_bank or []
# create a table
_lowercase : int = len(_UpperCAmelCase ) + 1
_lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
_lowercase : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
_lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 461
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_A = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ = "dhaka" , SCREAMING_SNAKE_CASE_ = 5 ):
lowercase_ : Optional[int] = min(SCREAMING_SNAKE_CASE_ , 50 ) # Prevent abuse!
lowercase_ : Union[str, Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
lowercase_ : Union[str, Any] = requests.get('https://www.google.com/search' , params=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = BeautifulSoup(html.text , 'html.parser' )
lowercase_ : List[Any] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
lowercase_ : Union[str, Any] = json.dumps(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = json.loads(SCREAMING_SNAKE_CASE_ )
lowercase_ : str = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , SCREAMING_SNAKE_CASE_ , )
if not matched_google_image_data:
return 0
lowercase_ : str = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(SCREAMING_SNAKE_CASE_ ) , )
lowercase_ : Union[str, Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , SCREAMING_SNAKE_CASE_ , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE_ ):
if index >= max_images:
return index
lowercase_ : List[str] = bytes(SCREAMING_SNAKE_CASE_ , 'ascii' ).decode(
'unicode-escape' )
lowercase_ : Union[str, Any] = bytes(SCREAMING_SNAKE_CASE_ , 'ascii' ).decode(
'unicode-escape' )
lowercase_ : Any = urllib.request.build_opener()
lowercase_ : Any = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE_ )
lowercase_ : Dict = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE_ , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
_A = download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 702
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : Any = '''mra'''
def __init__(self , _a=50_265 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=1 , _a=0.02 , _a=1e-5 , _a="absolute" , _a=4 , _a="full" , _a=0 , _a=0 , _a=1 , _a=0 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : List[str] = max_position_embeddings
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[int] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : str = initializer_range
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = position_embedding_type
lowercase_ : Any = block_per_row
lowercase_ : Optional[int] = approx_mode
lowercase_ : int = initial_prior_first_n_blocks
lowercase_ : str = initial_prior_diagonal_n_blocks
| 438
| 0
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __lowerCAmelCase ( _A ,_A=() ,_A=None ,_A="no" ,_A="29500" ):
"""simple docstring"""
_lowercase = False
_lowercase = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
_lowercase = True
elif "IPython" in sys.modules:
_lowercase = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
_lowercase = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" ,UpperCAmelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
_lowercase = 8
_lowercase = PrepareForLaunch(UpperCAmelCase_ ,distributed_type="""TPU""" )
print(f'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCAmelCase_ ,args=UpperCAmelCase_ ,nprocs=UpperCAmelCase_ ,start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*UpperCAmelCase_ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase_ ,master_addr="""127.0.01""" ,master_port=UpperCAmelCase_ ,mixed_precision=UpperCAmelCase_ ):
_lowercase = PrepareForLaunch(UpperCAmelCase_ ,distributed_type="""MULTI_GPU""" )
print(f'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCAmelCase_ ,args=UpperCAmelCase_ ,nprocs=UpperCAmelCase_ ,start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowercase = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*UpperCAmelCase_ )
def __lowerCAmelCase ( _A ,_A=() ,_A=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase_ ,master_addr="""127.0.01""" ,master_port="""29500""" ,accelerate_mixed_precision="""no""" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="""yes""" ,):
_lowercase = PrepareForLaunch(UpperCAmelCase_ ,debug=UpperCAmelCase_ )
start_processes(UpperCAmelCase_ ,args=UpperCAmelCase_ ,nprocs=UpperCAmelCase_ ,start_method="""fork""" )
| 398
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : str = logging.get_logger(__name__)
a__ : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = '''xlm'''
__lowerCAmelCase = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self , UpperCamelCase_=3_0145 , UpperCamelCase_=2048 , UpperCamelCase_=12 , UpperCamelCase_=16 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=512 , UpperCamelCase_=2048**-0.5 , UpperCamelCase_=1E-1_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=5 , UpperCamelCase_=True , UpperCamelCase_="first" , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=0.1 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=0 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=0 , **UpperCamelCase_ , ) -> List[str]:
snake_case__ = vocab_size
snake_case__ = emb_dim
snake_case__ = n_layers
snake_case__ = n_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = gelu_activation
snake_case__ = sinusoidal_embeddings
snake_case__ = causal
snake_case__ = asm
snake_case__ = n_langs
snake_case__ = use_lang_emb
snake_case__ = layer_norm_eps
snake_case__ = bos_index
snake_case__ = eos_index
snake_case__ = pad_index
snake_case__ = unk_index
snake_case__ = mask_index
snake_case__ = is_encoder
snake_case__ = max_position_embeddings
snake_case__ = embed_init_std
snake_case__ = init_std
snake_case__ = summary_type
snake_case__ = summary_use_proj
snake_case__ = summary_activation
snake_case__ = summary_proj_to_labels
snake_case__ = summary_first_dropout
snake_case__ = start_n_top
snake_case__ = end_n_top
snake_case__ = mask_token_id
snake_case__ = lang_id
if "n_words" in kwargs:
snake_case__ = kwargs['n_words']
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
snake_case__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 368
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[int] =42
lowerCamelCase : Union[str, Any] =42
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Tuple:
a : list[list[Edge]] = [[] for _ in range(__lowerCamelCase )]
a : int = size
def __getitem__( self , lowerCAmelCase__ ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def __a ( self ) -> Tuple:
return self._size
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__lowerCamelCase , __lowerCamelCase ) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int | None:
a : List[Any] = deque([start_vertex] )
a : list[int | None] = [None] * self.size
a : str = 0
while queue:
a : Union[str, Any] = queue.popleft()
a : int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
a : Tuple = current_distance + edge.weight
a : str = distances[edge.destination_vertex]
if (
isinstance(__lowerCamelCase , __lowerCamelCase )
and new_distance >= dest_vertex_distance
):
continue
a : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
SCREAMING_SNAKE_CASE__ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
SCREAMING_SNAKE_CASE__ = features.copy()
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ , split=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[Any] ):
if issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = jsonl_path
elif issubclass(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = [jsonl_path]
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_dataset(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] , UpperCamelCase__: Dict=("train",) ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for split in splits:
SCREAMING_SNAKE_CASE__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ = (
Features({feature: Value(UpperCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] ):
if split:
SCREAMING_SNAKE_CASE__ = {split: jsonl_path}
else:
SCREAMING_SNAKE_CASE__ = """train"""
SCREAMING_SNAKE_CASE__ = {"""train""": jsonl_path, """test""": jsonl_path}
SCREAMING_SNAKE_CASE__ = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ = JsonDatasetReader(UpperCamelCase__ , cache_dir=UpperCamelCase__ ).read()
_check_json_datasetdict(UpperCamelCase__ , UpperCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return json.load(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
return [json.loads(UpperCamelCase__ ) for line in buffer]
class UpperCamelCase_ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self :str , __A :int , __A :Optional[Any] , __A :Dict ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self :Optional[int] , __A :Union[str, Any] , __A :Tuple , __A :int , __A :str , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self :Union[str, Any] , __A :Tuple , __A :Dict , __A :Any ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json_function(__A )
assert isinstance(__A , __A )
assert isinstance(exported_content[0] , __A )
assert len(__A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self :int , __A :List[str] , __A :List[Any] , __A :List[Any] , __A :str , __A :List[Any] ) -> Optional[int]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , lines=__A , orient=__A , num_proc=2 ).write()
buffer.seek(0 )
SCREAMING_SNAKE_CASE__ = load_json(__A )
assert isinstance(__A , __A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__A , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__A ) == 10
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with pytest.raises(__A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__A , __A , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _snake_case ( self :Dict , __A :List[Any] , __A :Any , __A :str , __A :Tuple , __A :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""data""" ) / f'''test.json.{extension}'''
SCREAMING_SNAKE_CASE__ = str(shared_datadir / f'''test_file.json.{extension}''' )
JsonDatasetWriter(__A , __A , compression=__A ).write()
with fsspec.open(__A , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
with fsspec.open(__A , """rb""" , compression="""infer""" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
assert exported_content == original_content
| 6
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 563
| 0
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowerCAmelCase ( A__: Dict ):
'''simple docstring'''
UpperCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _lowerCAmelCase ( A__: Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = emb.weight.shape
UpperCAmelCase = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
UpperCAmelCase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( A__: Union[str, Any] , A__: Any="facebook/mbart-large-en-ro" , A__: List[Any]=False , A__: Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase = torch.load(__snake_case , map_location='''cpu''' )['''model''']
remove_ignore_keys_(__snake_case )
UpperCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase = MBartConfig.from_pretrained(__snake_case , vocab_size=__snake_case )
if mbart_aa and finetuned:
UpperCAmelCase = '''relu'''
UpperCAmelCase = state_dict['''decoder.embed_tokens.weight''']
UpperCAmelCase = MBartForConditionalGeneration(__snake_case )
model.model.load_state_dict(__snake_case )
if finetuned:
UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__magic_name__ = parser.parse_args()
__magic_name__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 707
|
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )[2:]
if shift_amount >= len(A__ ):
return "0b0"
UpperCAmelCase = binary_number[: len(A__ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase = '''0''' + str(bin(A__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase = len(bin(A__ )[3:] ) # Find 2's complement of number
UpperCAmelCase = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
'''1''' + '''0''' * (binary_number_length - len(A__ )) + binary_number
)
if shift_amount >= len(A__ ):
return "0b" + binary_number[0] * len(A__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
| 0
|
'''simple docstring'''
from itertools import count
def _lowerCAmelCase ( _UpperCamelCase : int = 50 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
return str(_UpperCamelCase ) == str(_UpperCamelCase )[::-1]
def _lowerCAmelCase ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return int(_UpperCamelCase ) + int(str(_UpperCamelCase )[::-1] )
def _lowerCAmelCase ( _UpperCamelCase : int = 1_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for num in range(1 , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =num
while iterations < 50:
_SCREAMING_SNAKE_CASE =sum_reverse(_UpperCamelCase )
iterations += 1
if is_palindrome(_UpperCamelCase ):
break
else:
lychrel_nums.append(_UpperCamelCase )
return len(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 405
| 1
|
def _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowercase = [p / w for p, w in zip(snake_case_ , snake_case_ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowercase = sorted(snake_case_ )
# declaring useful variables
_lowercase = len(snake_case_ )
_lowercase = 0
_lowercase = 0
_lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowercase = sorted_profit_by_weight[length - i - 1]
_lowercase = profit_by_weight.index(snake_case_ )
_lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_lowerCamelCase = [int(x) for x in input('Input profits separated by spaces: ').split()]
_lowerCamelCase = [int(x) for x in input('Input weights separated by spaces: ').split()]
_lowerCamelCase = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 711
|
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 572
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = KandinskyVaaPriorPipeline
__lowerCAmelCase = ['''prompt''']
__lowerCAmelCase = ['''prompt''', '''negative_prompt''']
__lowerCAmelCase = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__lowerCAmelCase = False
@property
def _snake_case ( self ) -> List[Any]:
return 32
@property
def _snake_case ( self ) -> int:
return 32
@property
def _snake_case ( self ) -> Dict:
return self.time_input_dim
@property
def _snake_case ( self ) -> Dict:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> List[str]:
return 100
@property
def _snake_case ( self ) -> Optional[Any]:
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
snake_case__ = PriorTransformer(**UpperCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
snake_case__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _snake_case ( self ) -> Any:
torch.manual_seed(0 )
snake_case__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
snake_case__ = CLIPVisionModelWithProjection(UpperCamelCase_ )
return model
@property
def _snake_case ( self ) -> Tuple:
snake_case__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_prior
snake_case__ = self.dummy_image_encoder
snake_case__ = self.dummy_text_encoder
snake_case__ = self.dummy_tokenizer
snake_case__ = self.dummy_image_processor
snake_case__ = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=UpperCamelCase_ , clip_sample_range=1_0.0 , )
snake_case__ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Any:
if str(UpperCamelCase_ ).startswith('mps' ):
snake_case__ = torch.manual_seed(UpperCamelCase_ )
else:
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
snake_case__ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _snake_case ( self ) -> int:
snake_case__ = 'cpu'
snake_case__ = self.get_dummy_components()
snake_case__ = self.pipeline_class(**UpperCamelCase_ )
snake_case__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
snake_case__ = output.image_embeds
snake_case__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -10:]
snake_case__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
snake_case__ = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _snake_case ( self ) -> str:
snake_case__ = torch_device == 'cpu'
snake_case__ = True
snake_case__ = False
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
@skip_mps
def _snake_case ( self ) -> List[Any]:
snake_case__ = torch_device == 'cpu'
snake_case__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
| 368
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> str:
snake_case__ = 1
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def _snake_case ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCamelCase_ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
snake_case__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
return CLIPTextModel(UpperCamelCase_ )
def _snake_case ( self ) -> Union[str, Any]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=UpperCamelCase_ , )[0]
snake_case__ = image[0, -3:, -3:, -1]
snake_case__ = image_from_tuple[0, -3:, -3:, -1]
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> List[str]:
snake_case__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
snake_case__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
snake_case__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _snake_case ( self ) -> str:
snake_case__ = self.dummy_cond_unet_upscale
snake_case__ = DDPMScheduler()
snake_case__ = DDIMScheduler(prediction_type='v_prediction' )
snake_case__ = self.dummy_vae
snake_case__ = self.dummy_text_encoder
snake_case__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ = unet.half()
snake_case__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ = StableDiffusionUpscalePipeline(
unet=UpperCamelCase_ , low_res_scheduler=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , max_noise_level=350 , )
snake_case__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
snake_case__ = 'A painting of a squirrel eating a burger'
snake_case__ = torch.manual_seed(0 )
snake_case__ = sd_pipe(
[prompt] , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='np' , ).images
snake_case__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _snake_case ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Optional[int]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def _snake_case ( self ) -> List[Any]:
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='np' , )
snake_case__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
snake_case__ = 'stabilityai/stable-diffusion-x4-upscaler'
snake_case__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ = 'a cat sitting on a park bench'
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , output_type='np' , )
snake_case__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 368
| 1
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
lowerCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
lowerCAmelCase__ = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , """dataset_info.json""" ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowerCAmelCase__ = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase__ = yaml.safe_dump(lowerCamelCase__ )
lowerCAmelCase__ = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _UpperCAmelCase ( ):
"""simple docstring"""
lowerCAmelCase__ = DatasetInfo()
lowerCAmelCase__ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
lowerCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase__ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , """README.md""" ) )
| 709
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __UpperCamelCase ):
UpperCamelCase_ : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Any = "image_segmenter"
UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation
UpperCamelCase_ : List[str] = ["image", "text"]
UpperCamelCase_ : int = ["image"]
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*snake_case__ , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" )
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ):
with torch.no_grad():
lowerCAmelCase__ = self.model(**snake_case__ ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = outputs.cpu().detach().numpy()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 674
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ ( __A , __A , unittest.TestCase ):
'''simple docstring'''
_lowercase = IFInpaintingPipeline
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCamelCase ( self ):
return self._get_dummy_components()
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
if str(__UpperCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : str =torch.manual_seed(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : List[Any] =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
self._test_save_load_local()
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 220
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__SCREAMING_SNAKE_CASE = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = 'ernie_m'
_lowercase = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __UpperCAmelCase = 250_002 , __UpperCAmelCase = 768 , __UpperCAmelCase = 12 , __UpperCAmelCase = 12 , __UpperCAmelCase = 3_072 , __UpperCAmelCase = "gelu" , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = 514 , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1 , __UpperCAmelCase = 1E-05 , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=0.0 , **__UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =vocab_size
SCREAMING_SNAKE_CASE_ : str =hidden_size
SCREAMING_SNAKE_CASE_ : Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_ : int =num_attention_heads
SCREAMING_SNAKE_CASE_ : str =intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] =hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_ : int =initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] =classifier_dropout
SCREAMING_SNAKE_CASE_ : List[str] =is_decoder
SCREAMING_SNAKE_CASE_ : Tuple =act_dropout
| 220
| 1
|
'''simple docstring'''
class a :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: Tuple = 0
__UpperCAmelCase: Tuple = 0
__UpperCAmelCase: Any = {}
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if vertex not in self.adjacency:
__UpperCAmelCase: List[str] = {}
self.num_vertices += 1
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
self.add_vertex(snake_case_ )
self.add_vertex(snake_case_ )
if head == tail:
return
__UpperCAmelCase: Dict = weight
__UpperCAmelCase: Dict = weight
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.get_edges()
for edge in edges:
__UpperCAmelCase: Any = edge
edges.remove((tail, head, weight) )
for i in range(len(snake_case_ ) ):
__UpperCAmelCase: Optional[Any] = list(edges[i] )
edges.sort(key=lambda snake_case_ : e[2] )
for i in range(len(snake_case_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__UpperCAmelCase: Any = edges[i][2] + 1
for edge in edges:
__UpperCAmelCase: int = edge
__UpperCAmelCase: int = weight
__UpperCAmelCase: List[Any] = weight
def __str__( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__UpperCAmelCase: str = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( snake_case_=None , snake_case_=None ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = Graph()
if vertices is None:
__UpperCAmelCase: Union[str, Any] = []
if edges is None:
__UpperCAmelCase: Dict = []
for vertex in vertices:
g.add_vertex(snake_case_ )
for edge in edges:
g.add_edge(*snake_case_ )
return g
class a :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
__UpperCAmelCase: str = {}
__UpperCAmelCase: List[Any] = {}
def __len__( self ):
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if item in self.parent:
return self.find(snake_case_ )
__UpperCAmelCase: Union[str, Any] = item
__UpperCAmelCase: List[Any] = 0
return item
def lowercase_ ( self , snake_case_ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(snake_case_ )
if item != self.parent[item]:
__UpperCAmelCase: Any = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.find(snake_case_ )
__UpperCAmelCase: Optional[Any] = self.find(snake_case_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__UpperCAmelCase: List[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
__UpperCAmelCase: Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__UpperCAmelCase: Optional[int] = roota
return roota
return None
@staticmethod
def lowercase_ ( snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = graph.num_vertices
__UpperCAmelCase: int = Graph.UnionFind()
__UpperCAmelCase: str = []
while num_components > 1:
__UpperCAmelCase: Union[str, Any] = {}
for vertex in graph.get_vertices():
__UpperCAmelCase: List[Any] = -1
__UpperCAmelCase: Dict = graph.get_edges()
for edge in edges:
__UpperCAmelCase: Tuple = edge
edges.remove((tail, head, weight) )
for edge in edges:
__UpperCAmelCase: Union[str, Any] = edge
__UpperCAmelCase: Optional[int] = union_find.find(snake_case_ )
__UpperCAmelCase: int = union_find.find(snake_case_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase: Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__UpperCAmelCase: Optional[int] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__UpperCAmelCase: int = cheap_edge[vertex]
if union_find.find(snake_case_ ) != union_find.find(snake_case_ ):
union_find.union(snake_case_ , snake_case_ )
mst_edges.append(cheap_edge[vertex] )
__UpperCAmelCase: str = num_components - 1
__UpperCAmelCase: str = Graph.build(edges=snake_case_ )
return mst
| 704
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_ = numpy.array([0, 0])
SCREAMING_SNAKE_CASE_ = numpy.array([0.5, 0.866_0254])
SCREAMING_SNAKE_CASE_ = numpy.array([1, 0])
SCREAMING_SNAKE_CASE_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] , _lowercase : int ) -> list[numpy.ndarray]:
__UpperCAmelCase: int = initial_vectors
for _ in range(_lowercase ):
__UpperCAmelCase: Any = iteration_step(_lowercase )
return vectors
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
__UpperCAmelCase: Optional[int] = []
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCAmelCase: Union[str, Any] = vectors[i + 1]
new_vectors.append(_lowercase )
__UpperCAmelCase: Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCamelCase__ ( _lowercase : numpy.ndarray , _lowercase : float ) -> numpy.ndarray:
__UpperCAmelCase: Tuple = numpy.radians(_lowercase )
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = numpy.cos(_lowercase ), numpy.sin(_lowercase )
__UpperCAmelCase: Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowercase , _lowercase )
def UpperCamelCase__ ( _lowercase : list[numpy.ndarray] ) -> None:
__UpperCAmelCase: Union[str, Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCAmelCase, __UpperCAmelCase: Dict = zip(*_lowercase )
plt.plot(_lowercase , _lowercase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 466
| 0
|
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCamelCase__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCamelCase__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowerCamelCase ( self ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="uniform_average" , __lowerCAmelCase=True ):
UpperCamelCase__ = mean_squared_error(
_UpperCamelCase , _UpperCamelCase , sample_weight=_UpperCamelCase , multioutput=_UpperCamelCase , squared=_UpperCamelCase )
return {"mse": mse}
| 619
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ = re.compile(r'''\s+''')
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__A ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = [len(__A ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__A ), "line_max": max(__A )}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( __A :str ,__A :int ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCamelCase__ ( __A :Optional[int] ,__A :int=5 ):
"""simple docstring"""
__snake_case = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__snake_case = example["""content"""].splitlines()
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( __A :int ,__A :int=5 ,__A :str=0.05 ):
"""simple docstring"""
__snake_case = ["""unit tests""", """test file""", """configuration file"""]
__snake_case = example["""content"""].splitlines()
__snake_case = 0
__snake_case = 0
# first test
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case = example["""content"""].count("""\n""" )
__snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = ["""def """, """class """, """for """, """while """]
__snake_case = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( __A :Dict ,__A :List[str]=4 ):
"""simple docstring"""
__snake_case = example["""content"""].splitlines()
__snake_case = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = tokenizer(example["""content"""] ,truncation=__A )["""input_ids"""]
__snake_case = len(example["""content"""] ) / len(__A )
return {"ratio": ratio}
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = {}
results.update(get_hash(__A ) )
results.update(line_stats(__A ) )
results.update(alpha_stats(__A ) )
results.update(char_token_ratio(__A ) )
results.update(is_autogenerated(__A ) )
results.update(is_config_or_test(__A ) )
results.update(has_no_keywords(__A ) )
results.update(has_few_assignments(__A ) )
return results
def lowerCamelCase__ ( __A :int ,__A :str ,__A :Dict ):
"""simple docstring"""
if not check_uniques(__A ,__A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""rb""" ) as f_in:
with gzip.open(str(__A ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__A ,__A )
os.unlink(__A )
# Settings
UpperCamelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ = multiprocessing.cpu_count()
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ = time.time()
UpperCamelCase__ = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
UpperCamelCase__ = set(ds.unique('''hash'''))
UpperCamelCase__ = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ = time.time()
UpperCamelCase__ ,UpperCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
UpperCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ = str(data_dir / F'file-{file_number+1:012}.json')
UpperCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 268
| 0
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
snake_case_ : Optional[Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __snake_case ( _UpperCAmelCase : Any):
UpperCamelCase = {}
state_dict.pop('''pixel_mean''', _UpperCAmelCase)
state_dict.pop('''pixel_std''', _UpperCAmelCase)
UpperCamelCase = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCamelCase = key.replace(_UpperCAmelCase, _UpperCAmelCase)
if re.match(_UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = int(re.match(_UpperCAmelCase, _UpperCAmelCase).group(2))
if layer_nb == 0:
UpperCamelCase = key.replace('''layers.0''', '''proj_in''')
elif layer_nb == 1:
UpperCamelCase = key.replace('''layers.1''', '''layers.0''')
elif layer_nb == 2:
UpperCamelCase = key.replace('''layers.2''', '''proj_out''')
UpperCamelCase = value
UpperCamelCase = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __snake_case ( _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple="ybelkada/segment-anything"):
UpperCamelCase = hf_hub_download(_UpperCAmelCase, f'checkpoints/{model_name}.pth')
if "sam_vit_b" in model_name:
UpperCamelCase = SamConfig()
elif "sam_vit_l" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], )
UpperCamelCase = SamConfig(
vision_config=_UpperCAmelCase, )
elif "sam_vit_h" in model_name:
UpperCamelCase = SamVisionConfig(
hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], )
UpperCamelCase = SamConfig(
vision_config=_UpperCAmelCase, )
UpperCamelCase = torch.load(_UpperCAmelCase, map_location='''cpu''')
UpperCamelCase = replace_keys(_UpperCAmelCase)
UpperCamelCase = SamImageProcessor()
UpperCamelCase = SamProcessor(image_processor=_UpperCAmelCase)
UpperCamelCase = SamModel(_UpperCAmelCase)
hf_model.load_state_dict(_UpperCAmelCase)
UpperCamelCase = hf_model.to('''cuda''')
UpperCamelCase = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
UpperCamelCase = Image.open(requests.get(_UpperCAmelCase, stream=_UpperCAmelCase).raw).convert('''RGB''')
UpperCamelCase = [[[400, 650]]]
UpperCamelCase = [[1]]
UpperCamelCase = processor(images=np.array(_UpperCAmelCase), return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
UpperCamelCase = hf_model(**_UpperCAmelCase)
UpperCamelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
UpperCamelCase = processor(
images=np.array(_UpperCAmelCase), input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
UpperCamelCase = hf_model(**_UpperCAmelCase)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
UpperCamelCase = ((75, 275, 1725, 850),)
UpperCamelCase = processor(images=np.array(_UpperCAmelCase), input_boxes=_UpperCAmelCase, return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
UpperCamelCase = hf_model(**_UpperCAmelCase)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
UpperCamelCase = [[[400, 650], [800, 650]]]
UpperCamelCase = [[1, 1]]
UpperCamelCase = processor(
images=np.array(_UpperCAmelCase), input_points=_UpperCAmelCase, input_labels=_UpperCAmelCase, return_tensors='''pt''').to('''cuda''')
with torch.no_grad():
UpperCamelCase = hf_model(**_UpperCAmelCase)
UpperCamelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
snake_case_ : Dict = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
snake_case_ : int = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 703
|
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(_UpperCAmelCase):
if len(_UpperCAmelCase) < i + 1:
data_lists.append([])
data_lists[i].append(float(_UpperCAmelCase))
return data_lists
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = []
for dlist, weight in zip(_UpperCAmelCase, _UpperCAmelCase):
UpperCamelCase = min(_UpperCAmelCase)
UpperCamelCase = max(_UpperCAmelCase)
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
UpperCamelCase = f'Invalid weight of {weight:f} provided'
raise ValueError(_UpperCAmelCase)
score_lists.append(_UpperCAmelCase)
return score_lists
def __snake_case ( _UpperCAmelCase : list[list[float]]):
UpperCamelCase = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(_UpperCAmelCase):
UpperCamelCase = final_scores[j] + ele
return final_scores
def __snake_case ( _UpperCAmelCase : list[list[float]], _UpperCAmelCase : list[int]):
UpperCamelCase = get_data(_UpperCAmelCase)
UpperCamelCase = calculate_each_score(_UpperCAmelCase, _UpperCAmelCase)
UpperCamelCase = generate_final_scores(_UpperCAmelCase)
# append scores to source data
for i, ele in enumerate(_UpperCAmelCase):
source_data[i].append(_UpperCAmelCase)
return source_data
| 350
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = (PNDMScheduler,)
UpperCamelCase__ : str = (('''num_inference_steps''', 5_0),)
def UpperCAmelCase_ ( self : Union[str, Any] , **lowerCAmelCase__ : Optional[Any] ) -> List[str]:
snake_case__ = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**a__ )
return config
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : int=0 , **lowerCAmelCase__ : Any ) -> Optional[Any]:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop("""num_inference_steps""" , a__ )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config(**a__ )
snake_case__ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case__ = scheduler_class.from_pretrained(a__ )
new_scheduler.set_timesteps(a__ )
# copy over dummy past residuals
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
snake_case__ = new_scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
snake_case__ = new_scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : List[str]=0 , **lowerCAmelCase__ : Any ) -> Dict:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop("""num_inference_steps""" , a__ )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a__ )
snake_case__ = scheduler_class.from_pretrained(a__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
snake_case__ = new_scheduler.step_prk(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
snake_case__ = scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
snake_case__ = new_scheduler.step_plms(a__ , a__ , a__ , **a__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : Tuple , **lowerCAmelCase__ : Optional[Any] ) -> int:
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(**a__ )
snake_case__ = scheduler_class(**a__ )
snake_case__ = 10
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter
scheduler.set_timesteps(a__ )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case__ = model(a__ , a__ )
snake_case__ = scheduler.step_prk(a__ , a__ , a__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case__ = model(a__ , a__ )
snake_case__ = scheduler.step_plms(a__ , a__ , a__ ).prev_sample
return sample
def UpperCAmelCase_ ( self : str ) -> Dict:
snake_case__ = dict(self.forward_default_kwargs )
snake_case__ = kwargs.pop("""num_inference_steps""" , a__ )
for scheduler_class in self.scheduler_classes:
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**a__ )
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ):
scheduler.set_timesteps(a__ )
elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ):
snake_case__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
snake_case__ = dummy_past_residuals[:]
snake_case__ = scheduler.step_prk(a__ , 0 , a__ , **a__ ).prev_sample
snake_case__ = scheduler.step_prk(a__ , 1 , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ = scheduler.step_plms(a__ , 0 , a__ , **a__ ).prev_sample
snake_case__ = scheduler.step_plms(a__ , 1 , a__ , **a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=a__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a__ )
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(steps_offset=1 )
snake_case__ = scheduler_class(**a__ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a__ , beta_end=a__ )
def UpperCAmelCase_ ( self : int ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a__ )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__ )
def UpperCAmelCase_ ( self : List[str] ) -> int:
for t in [1, 5, 10]:
self.check_over_forward(time_step=a__ )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=a__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
snake_case__ = 27
for scheduler_class in self.scheduler_classes:
snake_case__ = self.dummy_sample
snake_case__ = 0.1 * sample
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**a__ )
scheduler.set_timesteps(a__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case__ = scheduler.step_prk(a__ , a__ , a__ ).prev_sample
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
with self.assertRaises(a__ ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**a__ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
snake_case__ = self.full_loop()
snake_case__ = torch.sum(torch.abs(a__ ) )
snake_case__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
snake_case__ = self.full_loop(prediction_type="""v_prediction""" )
snake_case__ = torch.sum(torch.abs(a__ ) )
snake_case__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
snake_case__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.01 )
snake_case__ = torch.sum(torch.abs(a__ ) )
snake_case__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
snake_case__ = self.full_loop(set_alpha_to_one=a__ , beta_start=0.01 )
snake_case__ = torch.sum(torch.abs(a__ ) )
snake_case__ = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 214
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = tempfile.mkdtemp()
A_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
A_ = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(a__ , a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> str:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self , **a__ ) -> List[str]:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
A_ = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_tokenizer()
A_ = self.get_rust_tokenizer()
A_ = self.get_image_processor()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
A_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
A_ = self.get_image_processor(do_normalize=a__ )
A_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=a__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = self.prepare_image_inputs()
A_ = image_processor(a__ , return_tensors='''np''' )
A_ = processor(images=a__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = processor(text=a__ )
A_ = tokenizer(a__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ = processor.batch_decode(a__ )
A_ = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
A_ = self.get_image_processor()
A_ = self.get_tokenizer()
A_ = ChineseCLIPProcessor(tokenizer=a__ , image_processor=a__ )
A_ = '''Alexandra,T-shirt的价格是15便士。'''
A_ = self.prepare_image_inputs()
A_ = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 141
| 0
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowercase_ ( A__=32 , A__=10 , A__=100 , A__=1026 , A__=True , A__="data/tokenized_stories_train_wikitext103.jbl" , A__="igf_context_pairs.jbl" , ) -> Optional[Any]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
snake_case = generate_datasets(
A__ , A__ , number=A__ , min_len=1026 , trim=A__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
snake_case = load_gpta("gpt2" ).to(A__ )
print("computing perplexity on objective set" )
snake_case = compute_perplexity(A__ , A__ , A__ ).item()
print("perplexity on objective set:" , A__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowercase_ ( A__ , A__=15 , A__=128 , A__=100 , A__="igf_model.pt" , ) -> Dict:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
snake_case = SecondaryLearner(A__ )
# Train secondary learner
snake_case = train_secondary_learner(
A__ , A__ , max_epochs=A__ , batch_size=A__ , eval_freq=100 , igf_model_path=A__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowercase_ ( A__ , A__ , A__ , A__=32 , A__=1000 , A__=16 , A__=1.0 , A__=recopy_gpta , A__=None , A__=10 , A__="gpt2_finetuned.pt" , ) -> Dict:
"""simple docstring"""
snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
snake_case = RandomSampler(A__ )
snake_case = DataLoader(A__ , sampler=A__ )
snake_case = max_steps // (len(A__ )) + 1
snake_case = 0
snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=A__ )
snake_case = recopy_model(A__ , A__ , A__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(A__ )
secondary_learner.eval()
snake_case = []
snake_case = 0
snake_case = []
snake_case = []
# Compute the performance of the transformer model at the beginning
snake_case = compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print("Test perplexity, step" , A__ , ":" , A__ )
for epoch in range(int(A__ ) ):
for step, example in enumerate(A__ ):
torch.cuda.empty_cache()
snake_case = random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case = model(A__ , labels=A__ )
snake_case = True
if secondary_learner is not None:
snake_case = secondary_learner.forward(
torch.tensor(A__ , dtype=torch.long , device=A__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case = -1
if predicted_q < threshold:
snake_case = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case = compute_perplexity(A__ , A__ , A__ )
test_perps.append(A__ )
print("Test perplexity, step" , A__ , ":" , A__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=A__ , type=A__ , required=A__ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=A__ , type=A__ , required=A__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=A__ , default=A__ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=A__ , default=A__ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=A__ , type=A__ , required=A__ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=A__ , type=A__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=A__ , default=A__ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=A__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=A__ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=A__ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=A__ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=A__ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=A__ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=A__ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=A__ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=A__ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=A__ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=A__ , type=A__ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=A__ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=A__ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=A__ , type=A__ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=A__ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
snake_case = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
snake_case = training_secondary_learner(
A__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=A__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A__ , A__ , A__ , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=A__ , secondary_learner=A__ , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 717
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Any = "poolformer"
def __init__(self : Optional[int] , _A : Optional[Any]=3 , _A : Optional[int]=1_6 , _A : Dict=1_6 , _A : Tuple=3 , _A : Tuple=4.0 , _A : int=[2, 2, 6, 2] , _A : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A : int=[7, 3, 3, 3] , _A : List[str]=[4, 2, 2, 2] , _A : str=[2, 1, 1, 1] , _A : List[Any]=4 , _A : Any=0.0 , _A : Optional[Any]="gelu" , _A : Optional[Any]=True , _A : List[str]=1E-5 , _A : List[str]=0.02 , **_A : str , ) -> Tuple:
snake_case = num_channels
snake_case = patch_size
snake_case = stride
snake_case = padding
snake_case = pool_size
snake_case = hidden_sizes
snake_case = mlp_ratio
snake_case = depths
snake_case = patch_sizes
snake_case = strides
snake_case = num_encoder_blocks
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_layer_scale
snake_case = layer_scale_init_value
snake_case = initializer_range
super().__init__(**_A )
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Tuple = version.parse("1.11" )
@property
def UpperCAmelCase(self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : int ) -> float:
return 2E-3
| 294
| 0
|
from math import factorial, pi
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] = 30 ):
if not isinstance(UpperCamelCase__ ,(int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowerCAmelCase__ : List[str] = float(UpperCamelCase__ )
lowerCAmelCase__ : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCamelCase__ ) )
def _a ( __UpperCamelCase : Tuple ,__UpperCamelCase : str = 30 ):
if not isinstance(UpperCamelCase__ ,(int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(UpperCamelCase__ ,UpperCamelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowerCAmelCase__ : List[str] = float(UpperCamelCase__ )
lowerCAmelCase__ : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 233
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = []
for line in lines:
UpperCAmelCase = re.sub(R'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments
if line:
filtered_lines.append(UpperCamelCase__ )
UpperCAmelCase = '''\n'''.join(UpperCamelCase__ )
# Make a hash from all this code
UpperCAmelCase = full_str.encode('''utf-8''' )
return shaaaa(UpperCamelCase__ ).hexdigest()
# get importable module names and hash for caching
__A : List[str] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__A : Tuple = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__A : int = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__A : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 130
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size ,__UpperCAmelCase ):
A__ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
A__ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
A__ = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(__UpperCAmelCase ,__UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,eta=__UpperCAmelCase ,use_clipped_model_output=__UpperCAmelCase ,generator=__UpperCAmelCase ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 ,1 )
A__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 536
|
"""simple docstring"""
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self ,__UpperCAmelCase ) -> str:
A__ = parent
def snake_case__ ( self ) -> int:
return {}
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
A__ = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class UpperCamelCase__( __A , unittest.TestCase ):
lowerCAmelCase__ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self ) -> Dict:
A__ = MarkupLMFeatureExtractionTester(self )
@property
def snake_case__ ( self ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self ) -> Any:
# Initialize feature_extractor
A__ = self.feature_extraction_class()
# Test not batched input
A__ = get_html_strings()[0]
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
A__ = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
# Test batched
A__ = get_html_strings()
A__ = feature_extractor(__UpperCAmelCase )
# fmt: off
A__ = expected_nodes + [['My First Heading', 'My first paragraph.']]
A__ = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) ,2 )
self.assertEqual(len(encoding.xpaths ) ,2 )
self.assertEqual(encoding.nodes ,__UpperCAmelCase )
self.assertEqual(encoding.xpaths ,__UpperCAmelCase )
| 536
| 1
|
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__UpperCamelCase = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _A :
def __init__( self : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[str]=7 , __magic_name__ : Union[str, Any]=14 , __magic_name__ : Any=10 , __magic_name__ : Tuple=19 , __magic_name__ : int=5 , __magic_name__ : Any=4 , __magic_name__ : Tuple=True , __magic_name__ : int=16 , __magic_name__ : Any=2 , __magic_name__ : List[str]=4 , __magic_name__ : str=4 , __magic_name__ : Any="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[int]=[1, 2, 3, 4, 5] , __magic_name__ : Union[str, Any]=25 , __magic_name__ : Union[str, Any]=5 , ) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = d_model
__snake_case : int = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = prediction_length
__snake_case : str = context_length
__snake_case : str = cardinality
__snake_case : List[Any] = num_time_features
__snake_case : Union[str, Any] = lags_sequence
__snake_case : str = embedding_dimension
__snake_case : Optional[int] = is_training
__snake_case : Tuple = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : str = hidden_dropout_prob
__snake_case : Tuple = attention_probs_dropout_prob
__snake_case : str = context_length
__snake_case : int = prediction_length + label_length
__snake_case : Optional[Any] = label_length
__snake_case : List[str] = moving_average
__snake_case : str = autocorrelation_factor
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : List[Any] = config.context_length + max(config.lags_sequence )
__snake_case : Any = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__snake_case : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__snake_case : List[str] = floats_tensor([self.batch_size, _past_length] )
__snake_case : List[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__snake_case : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__snake_case : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
__snake_case : Union[str, Any] = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_config()
__snake_case : Any = self.prepare_autoformer_inputs_dict(__magic_name__ )
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case , __snake_case : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : List[str] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = AutoformerModel(config=__magic_name__ ).to(__magic_name__ ).eval()
__snake_case : Tuple = model(**__magic_name__ )
__snake_case : Optional[int] = outputs.encoder_last_hidden_state
__snake_case : Any = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = model.get_encoder()
encoder.save_pretrained(__magic_name__ )
__snake_case : List[str] = AutoformerEncoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = model.create_network_inputs(**__magic_name__ )
__snake_case , __snake_case : List[str] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__snake_case : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__snake_case : Optional[int] = encoder(inputs_embeds=__magic_name__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
__snake_case : str = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__snake_case : Any = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__snake_case : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__snake_case : Optional[int] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = model.get_decoder()
decoder.save_pretrained(__magic_name__ )
__snake_case : Dict = AutoformerDecoder.from_pretrained(__magic_name__ ).to(__magic_name__ )
__snake_case : str = decoder(
trend=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Tuple = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__: int = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__: int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase__: int = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: int = False
lowercase__: List[str] = False
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = AutoformerModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
__snake_case , __snake_case : str = model_class.from_pretrained(__magic_name__ , output_loading_info=__magic_name__ )
self.assertEqual(info["""missing_keys"""] , [] )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__magic_name__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowercase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = inspect.signature(getattr(__magic_name__ , """forward""" ) )
# The main input is the name of the argument after `self`
__snake_case : List[Any] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __magic_name__ )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__magic_name__ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Tuple = [*signature.parameters.keys()]
__snake_case : Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__magic_name__ )] , __magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any = True
__snake_case : Union[str, Any] = getattr(self.model_tester , """seq_length""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """decoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """encoder_seq_length""" , __magic_name__ )
__snake_case : Tuple = getattr(self.model_tester , """d_model""" , __magic_name__ )
__snake_case : str = getattr(self.model_tester , """num_attention_heads""" , __magic_name__ )
__snake_case : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__snake_case : Optional[int] = True
__snake_case : List[Any] = False
__snake_case : Optional[Any] = True
__snake_case : List[str] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : str = True
__snake_case : Optional[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__snake_case : Union[str, Any] = len(__magic_name__ )
__snake_case : Dict = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__magic_name__ , __magic_name__ )
# decoder attentions
__snake_case : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__snake_case : str = outputs.cross_attentions
self.assertIsInstance(__magic_name__ , (list, tuple) )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__snake_case : str = True
__snake_case : int = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 2 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def _a ( _lowerCamelCase="train-batch.pt" ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCamelCase , repo_type="""dataset""" )
__snake_case : Union[str, Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : int = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : str = prepare_batch()
with torch.no_grad():
__snake_case : Any = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__snake_case : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Any = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Optional[int] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__snake_case : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __magic_name__ )
__snake_case : Tuple = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=__magic_name__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__snake_case : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__magic_name__ )
__snake_case : Tuple = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__snake_case : Any = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__snake_case : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __magic_name__ )
__snake_case : int = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=__magic_name__ )
__snake_case : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __magic_name__ , rtol=1E-1 ) )
| 26
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __a ( self ):
_lowercase : List[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_lowercase : List[Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_lowerCAmelCase )
from datasets import load_dataset
_lowercase : Union[str, Any] = load_dataset('nielsr/rvlcdip-demo' )
_lowercase : Any = dataset['train'][0]['image'].convert('RGB' )
_lowercase : List[str] = image_processor(_lowerCAmelCase , return_tensors='pt' ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowercase : Dict = model(**_lowerCAmelCase )
_lowercase : Any = outputs.logits
_lowercase : str = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , _lowerCAmelCase )
_lowercase : Union[str, Any] = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=_lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 66
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
A__ = 4_2
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
A__ = 4_2
A__ = 4_2
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 708
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __UpperCamelCase ( a, a, a, a, a) ->Dict:
for attribute in key.split("."):
lowerCamelCase__ = getattr(a, a)
if weight_type is not None:
lowerCamelCase__ = getattr(a, a).shape
else:
lowerCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
lowerCamelCase__ = value
elif weight_type == "weight_g":
lowerCamelCase__ = value
elif weight_type == "weight_v":
lowerCamelCase__ = value
elif weight_type == "bias":
lowerCamelCase__ = value
elif weight_type == "running_mean":
lowerCamelCase__ = value
elif weight_type == "running_var":
lowerCamelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCamelCase__ = value
elif weight_type == "inv_freq":
lowerCamelCase__ = value
else:
lowerCamelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def __UpperCamelCase ( a, a, a) ->Optional[int]:
lowerCamelCase__ = []
lowerCamelCase__ = fairseq_model.state_dict()
lowerCamelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
a, a, a, a, hf_model.config.feat_extract_norm == "group", )
lowerCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
lowerCamelCase__ = True
if "*" in mapped_key:
lowerCamelCase__ = name.split(a)[0].split(".")[-2]
lowerCamelCase__ = mapped_key.replace("*", a)
if "pos_bias_u" in name:
lowerCamelCase__ = None
elif "pos_bias_v" in name:
lowerCamelCase__ = None
elif "weight_g" in name:
lowerCamelCase__ = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ = "weight_v"
elif "bias" in name:
lowerCamelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ = "weight"
elif "running_mean" in name:
lowerCamelCase__ = "running_mean"
elif "inv_freq" in name:
lowerCamelCase__ = "inv_freq"
elif "running_var" in name:
lowerCamelCase__ = "running_var"
elif "num_batches_tracked" in name:
lowerCamelCase__ = "num_batches_tracked"
else:
lowerCamelCase__ = None
set_recursively(a, a, a, a, a)
continue
if not is_used:
unused_weights.append(a)
logger.warning(f"Unused weights: {unused_weights}")
def __UpperCamelCase ( a, a, a, a, a) ->str:
lowerCamelCase__ = full_name.split("conv_layers.")[-1]
lowerCamelCase__ = name.split(".")
lowerCamelCase__ = int(items[0])
lowerCamelCase__ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.")
lowerCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(a)
@torch.no_grad()
def __UpperCamelCase ( a, a, a=None, a=None, a=True) ->Optional[Any]:
if config_path is not None:
lowerCamelCase__ = WavaVecaConformerConfig.from_pretrained(a, hidden_act="swish")
else:
lowerCamelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCamelCase__ = "rotary"
if is_finetuned:
if dict_path:
lowerCamelCase__ = Dictionary.load(a)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__ = target_dict.pad_index
lowerCamelCase__ = target_dict.bos_index
lowerCamelCase__ = target_dict.eos_index
lowerCamelCase__ = len(target_dict.symbols)
lowerCamelCase__ = os.path.join(a, "vocab.json")
if not os.path.isdir(a):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(a))
return
os.makedirs(a, exist_ok=a)
lowerCamelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__ = 0
lowerCamelCase__ = 1
with open(a, "w", encoding="utf-8") as vocab_handle:
json.dump(a, a)
lowerCamelCase__ = WavaVecaCTCTokenizer(
a, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=a, )
lowerCamelCase__ = True if config.feat_extract_norm == "layer" else False
lowerCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=a, return_attention_mask=a, )
lowerCamelCase__ = WavaVecaProcessor(feature_extractor=a, tokenizer=a)
processor.save_pretrained(a)
lowerCamelCase__ = WavaVecaConformerForCTC(a)
else:
lowerCamelCase__ = WavaVecaConformerForPreTraining(a)
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
lowerCamelCase__ = argparse.Namespace(task="audio_pretraining")
lowerCamelCase__ = fairseq.tasks.setup_task(a)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=a)
lowerCamelCase__ = model[0].eval()
recursively_load_weights(a, a, not is_finetuned)
hf_wavavec.save_pretrained(a)
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 360
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
def __init__( self: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: int=13 ,__lowerCAmelCase: List[str]=30 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: List[Any]=5 ,__lowerCAmelCase: int=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Optional[Any]=10 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: Union[str, Any]=3 ,__lowerCAmelCase: Tuple=0.6 ,__lowerCAmelCase: Dict=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Any = image_size
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : str = use_labels
_lowerCamelCase : List[Any] = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Any = attention_probs_dropout_prob
_lowerCamelCase : str = type_sequence_label_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Dict = mask_ratio
_lowerCamelCase : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase : str = (image_size // patch_size) ** 2
_lowerCamelCase : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _lowercase ( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = ViTMAEModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2
_lowerCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = ViTMAEForPreTraining(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase )
_lowerCamelCase : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCAmelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = ViTMAEModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase ,nn.Linear ) )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCAmelCase )
def _lowercase ( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase : Dict = pt_noise
super().check_pt_tf_models(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Any = outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : str = model_class.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_lowerCamelCase : Dict = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
# Make sure we don't have nans
_lowerCamelCase : Union[str, Any] = after_outputs[0].cpu().numpy()
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCAmelCase ,1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: str ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def _lowercase ( self: Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: Dict ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def _lowercase ( self: int ):
'''simple docstring'''
np.random.seed(2 )
_lowerCamelCase : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Tuple = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase : Tuple = ViTMAEConfig()
_lowerCamelCase : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase ,noise=torch.from_numpy(__lowerCAmelCase ).to(device=__lowerCAmelCase ) )
# verify the logits
_lowerCamelCase : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(__lowerCAmelCase ) ,atol=1e-4 ) )
| 46
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=24 , snake_case__=2 , snake_case__=6 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=None , snake_case__=1_000 , ):
"""simple docstring"""
lowerCAmelCase : Dict = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[str] = seq_length
lowerCAmelCase : Any = is_training
lowerCAmelCase : Dict = use_input_mask
lowerCAmelCase : Any = use_token_type_ids
lowerCAmelCase : Any = use_labels
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : str = num_hidden_layers
lowerCAmelCase : Optional[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : Union[str, Any] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[Any] = scope
lowerCAmelCase : int = range_bbox
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase : List[Any] = bbox[i, j, 3]
lowerCAmelCase : Optional[Any] = bbox[i, j, 1]
lowerCAmelCase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase : Optional[Any] = bbox[i, j, 2]
lowerCAmelCase : str = bbox[i, j, 0]
lowerCAmelCase : List[Any] = t
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Any = None
lowerCAmelCase : List[Any] = None
if self.use_labels:
lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = LiltModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : str = model(snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : List[Any] = model(snake_case__ , bbox=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase : Dict = model(snake_case__ , bbox=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.num_labels
lowerCAmelCase : Optional[Any] = LiltForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = LiltForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : str = config_and_inputs
lowerCAmelCase : int = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a : Any =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a : List[Any] =False
a : Union[str, Any] =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return True
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = LiltModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : int = type
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = LiltModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case__ )
lowerCAmelCase : str = torch.tensor([[1, 2]] , device=snake_case__ )
lowerCAmelCase : Tuple = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(input_ids=snake_case__ , bbox=snake_case__ )
lowerCAmelCase : str = torch.Size([1, 2, 768] )
lowerCAmelCase : List[str] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=snake_case__ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case__ , atol=1e-3 ) )
| 645
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] =CanineTokenizer
lowercase : Union[str, Any] =False
def UpperCamelCase ( self ):
super().setUp()
lowercase_ :Union[str, Any] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :int = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
lowercase_ :int = 1024
return tokenizer
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.canine_tokenizer
lowercase_ :int = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowercase_ :str = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowercase_ :List[Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[Any] = self.canine_tokenizer
lowercase_ :Dict = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowercase_ :Tuple = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , UpperCamelCase_ )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertIn('''token_type_ids''' , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.canine_tokenizer
lowercase_ :str = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowercase_ :Tuple = tokenizer(
text_target=UpperCamelCase_ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCamelCase ( self ):
# safety check on max_len default value so we are sure the test works
lowercase_ :str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase_ :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :str = ''' He is very happy, UNwant\u00E9d,running'''
lowercase_ :str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
lowercase_ :List[Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
shutil.rmtree(UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase_ :List[Any] = tempfile.mkdtemp()
lowercase_ :List[Any] = ''' He is very happy, UNwant\u00E9d,running'''
lowercase_ :List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowercase_ :Optional[int] = chr(0XE007 )
additional_special_tokens.append(UpperCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowercase_ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Dict = tokenizer.__class__.from_pretrained(UpperCamelCase_ )
lowercase_ :Optional[int] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertIn(UpperCamelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase_ :Any = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase_ , lowercase_ :Optional[int] = self.get_clean_sequence(UpperCamelCase_ )
# a special token for Canine can be defined as follows:
lowercase_ :List[Any] = 0XE005
lowercase_ :Any = chr(UpperCamelCase_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowercase_ :Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
lowercase_ :Tuple = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCamelCase_ )
lowercase_ :Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase_ :Any = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase_ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , input_encoded + special_token_id )
lowercase_ :int = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCamelCase ( self ):
lowercase_ :str = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase_ :Optional[int] = chr(0XE005 )
lowercase_ :Tuple = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCamelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowercase_ :Any = tokenizer.tokenize(UpperCamelCase_ )
lowercase_ :str = tokenizer.tokenize(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
self.assertEqual(token_a[0] , UpperCamelCase_ )
self.assertEqual(token_a[0] , UpperCamelCase_ )
@require_tokenizers
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowercase_ :int = 0XE006
lowercase_ :Optional[Any] = chr(UpperCamelCase_ )
lowercase_ :Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCamelCase_ )
tokenizer.from_pretrained(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase_ :Dict = json.load(UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase_ :int = json.load(UpperCamelCase_ )
# a special token for Canine can be defined as follows:
lowercase_ :List[Any] = 0XE006
lowercase_ :List[str] = chr(UpperCamelCase_ )
lowercase_ :List[Any] = [new_token_a]
lowercase_ :List[str] = [new_token_a]
with open(os.path.join(UpperCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
with open(os.path.join(UpperCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase_ :Optional[int] = tokenizer_class.from_pretrained(UpperCamelCase_ , extra_ids=0 )
self.assertIn(UpperCamelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowercase_ :str = 0XE007
lowercase_ :Any = chr(UpperCamelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase_ :Optional[Any] = [AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ )]
lowercase_ :Union[str, Any] = tokenizer_class.from_pretrained(
UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , extra_ids=0 )
self.assertIn(UpperCamelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCamelCase ( self ):
lowercase_ :Dict = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase_ :str = '''hello world'''
if self.space_between_special_tokens:
lowercase_ :Tuple = '''[CLS] hello world [SEP]'''
else:
lowercase_ :Optional[int] = input
lowercase_ :Tuple = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
lowercase_ :int = tokenizer.decode(UpperCamelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCamelCase_ , [output, output.lower()] )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowercase_ :int = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowercase_ :int = '''a'''
lowercase_ :Union[str, Any] = ord(UpperCamelCase_ )
for attr in attributes_list:
setattr(UpperCamelCase_ , attr + '''_id''' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '''_id''' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , attr + '''_id''' , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(getattr(UpperCamelCase_ , attr + '''_id''' ) , UpperCamelCase_ )
setattr(UpperCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
lowercase_ :Union[str, Any] = 0XE006
lowercase_ :Dict = chr(UpperCamelCase_ )
setattr(UpperCamelCase_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCamelCase_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCamelCase_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
pass
| 441
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : int=None, lowercase : List[str]=None ) -> Any:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = tf.cast(tf.math.not_equal(lowercase_, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : int = OPTConfig
_snake_case : Any = {}
_snake_case : List[Any] = 'gelu'
def __init__( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=13 , lowerCAmelCase__ : Union[str, Any]=7 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Dict=99 , lowerCAmelCase__ : Tuple=16 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=20 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : int=16 , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = embed_dim
_UpperCamelCase = word_embed_proj_dim
_UpperCamelCase = False
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
_UpperCamelCase = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFOPTModel(config=lowercase_ )
_UpperCamelCase = inputs_dict['''input_ids''']
_UpperCamelCase = input_ids[:1, :]
_UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCamelCase = 1
# first forward pass
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ )[0]
_UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class __lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_snake_case : Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
_snake_case : List[Any] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_snake_case : int = False
_snake_case : int = False
_snake_case : Optional[int] = False
_snake_case : int = 1_0
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFOPTModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowercase_ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ):
if hasattr(lowercase_ , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCamelCase = model_class(config=lowercase_ )
_UpperCamelCase = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_UpperCamelCase = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
_UpperCamelCase = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
_UpperCamelCase = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCamelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
_UpperCamelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
_UpperCamelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase = False
self.assertTrue(lowercase_ )
def a__ ( lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
return tf.constant(lowercase_, dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : int = 9_9
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCamelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : Tuple ) -> Any:
'''simple docstring'''
_UpperCamelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
_UpperCamelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCamelCase = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
_UpperCamelCase = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
_UpperCamelCase = (1, 11, 512)
self.assertEqual(output.shape , lowercase_ )
_UpperCamelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
_UpperCamelCase = tf.function(lowercase_ , jit_compile=lowercase_ )
_UpperCamelCase = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any] ) -> str:
'''simple docstring'''
super().setUp()
_UpperCamelCase = '''facebook/opt-350m'''
def snake_case__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCamelCase = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCamelCase = tokenizer(lowercase_ , return_tensors='''tf''' , padding=lowercase_ , add_special_tokens=lowercase_ )
_UpperCamelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCamelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
_UpperCamelCase = tf.function(lowercase_ , jit_compile=lowercase_ )
_UpperCamelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-125m'''
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCamelCase = []
_UpperCamelCase = GPTaTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_UpperCamelCase = tokenizer(lowercase_ , return_tensors='''tf''' ).input_ids
_UpperCamelCase = model.generate(lowercase_ , max_length=10 )
_UpperCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-350m'''
_UpperCamelCase = GPTaTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowercase_ )
_UpperCamelCase = '''left'''
# use different length sentences to test batching
_UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase = tokenizer(lowercase_ , return_tensors='''tf''' , padding=lowercase_ )
_UpperCamelCase = inputs['''input_ids''']
_UpperCamelCase = model.generate(input_ids=lowercase_ , attention_mask=inputs['''attention_mask'''] )
_UpperCamelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_UpperCamelCase = model.generate(input_ids=lowercase_ )
_UpperCamelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
_UpperCamelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_UpperCamelCase = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
_UpperCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
_UpperCamelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = '''facebook/opt-350m'''
_UpperCamelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCamelCase = []
_UpperCamelCase = GPTaTokenizer.from_pretrained(lowercase_ )
_UpperCamelCase = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
_UpperCamelCase = tokenizer(lowercase_ , return_tensors='''tf''' ).input_ids
_UpperCamelCase = model.generate(lowercase_ , max_length=10 )
_UpperCamelCase = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 98
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MgpstrTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_snake_case : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
def __a ( self , **lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = '''tester'''
_snake_case : List[str] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_snake_case : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
_snake_case : Union[str, Any] = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case , _snake_case : int = self.get_input_output_texts(lowercase_ )
_snake_case : Optional[Any] = tokenizer.tokenize(lowercase_ )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
_snake_case : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) , 0 )
_snake_case : str = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __a ( self ) -> str:
'''simple docstring'''
pass
| 326
| 0
|
import math
def UpperCamelCase (lowercase_: Dict ) -> List[str]:
A__ : Any = 0
A__ : List[Any] = 0
while num > 0:
A__ : Union[str, Any] = num % 8
A__ : Tuple = octal + (remainder * math.floor(math.pow(10 , _snake_case ) ))
counter += 1
A__ : Union[str, Any] = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(_snake_case )}"""
def UpperCamelCase () -> Dict:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 707
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase (lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: np.ndarray , lowercase_: int , lowercase_: int ) -> np.ndarray:
A__ : Any = cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
A_ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
A_ : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A_ , A_ : Optional[Any] = gray_img.shape
# set different points to rotate image
A_ : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A_ : Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A_ : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A_ : Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A_ : Union[str, Any] = plt.figure(1)
A_ : Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 64
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(__a , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowercase_ :
"""simple docstring"""
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = np.abs((a - b) ).max()
self.assertLessEqual(lowercase_ , lowercase_ , f"Difference between torch and flax is {diff} (>= {tol})." )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
"""simple docstring"""
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
a_ = FlaxVisionTextDualEncoderModel(lowercase_ )
a_ = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
"""simple docstring"""
a_ = self.get_vision_text_model(lowercase_ , lowercase_ )
a_ = {'''vision_model''': vision_model, '''text_model''': text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
a_ = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
"""simple docstring"""
a_ = self.get_vision_text_model(lowercase_ , lowercase_ )
a_ = {'''vision_model''': vision_model, '''text_model''': text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
a_ = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
a_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
a_ = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ )
a_ = after_output[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1e-3 )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
"""simple docstring"""
a_ = self.get_vision_text_model(lowercase_ , lowercase_ )
a_ = {'''vision_model''': vision_model, '''text_model''': text_model}
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ )
a_ = model(
input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ )
a_ = output.vision_model_output.attentions
self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = to_atuple(vision_model.config.image_size )
a_ = to_atuple(vision_model.config.patch_size )
a_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a_ = output.text_model_output.attentions
self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
pt_model.to(lowercase_ )
pt_model.eval()
# prepare inputs
a_ = inputs_dict
a_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a_ = pt_model(**lowercase_ ).to_tuple()
a_ = fx_model(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase_ )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ , from_pt=lowercase_ )
a_ = fx_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase_ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase_ )
a_ = VisionTextDualEncoderModel.from_pretrained(lowercase_ , from_flax=lowercase_ )
pt_model_loaded.to(lowercase_ )
pt_model_loaded.eval()
with torch.no_grad():
a_ = pt_model_loaded(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase_ , pt_output_loaded.numpy() , 4e-2 )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
a_ = VisionTextDualEncoderModel(lowercase_ )
a_ = FlaxVisionTextDualEncoderModel(lowercase_ )
a_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_ )
a_ = fx_state
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ )
a_ = VisionTextDualEncoderModel(lowercase_ )
a_ = FlaxVisionTextDualEncoderModel(lowercase_ )
a_ = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params )
self.check_pt_flax_equivalence(lowercase_ , lowercase_ , lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
self.check_save_load(**lowercase_ )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase_ )
@is_pt_flax_cross_test
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.prepare_config_and_inputs()
a_ = config_inputs_dict.pop("""vision_config""" )
a_ = config_inputs_dict.pop("""text_config""" )
a_ = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase_ , lowercase_ , lowercase_ )
self.check_equivalence_flax_to_pt(lowercase_ , lowercase_ , lowercase_ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.get_pretrained_model_and_inputs()
a_ = model_a(**lowercase_ )
a_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase_ )
a_ = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase_ )
a_ = model_a(**lowercase_ )
a_ = after_outputs[0]
a_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1e-5 )
@require_flax
class lowercase_ ( UpperCAmelCase__ ,unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = FlaxViTModel(lowercase_ )
a_ = FlaxBertModel(lowercase_ )
return vision_model, text_model
def lowercase__ ( self ):
"""simple docstring"""
a_ = FlaxViTModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = vit_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ = vision_config_and_inputs
a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowercase_ ( UpperCAmelCase__ ,unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase_ , text_from_pt=lowercase_ , )
a_ = 13
a_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a_ = random_attention_mask([batch_size, 4] )
a_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = FlaxCLIPVisionModel(lowercase_ )
a_ = FlaxBertModel(lowercase_ )
return vision_model, text_model
def lowercase__ ( self ):
"""simple docstring"""
a_ = FlaxCLIPVisionModelTester(self )
a_ = FlaxBertModelTester(self )
a_ = clip_model_tester.prepare_config_and_inputs()
a_ = bert_model_tester.prepare_config_and_inputs()
a_ = vision_config_and_inputs
a_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
a_ = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
a_ = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
a_ = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase_ , padding=lowercase_ , return_tensors="""np""" )
a_ = model(**lowercase_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 ) )
| 483
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : List[str] = vocab
SCREAMING_SNAKE_CASE_ : Any = merges
SCREAMING_SNAKE_CASE_ : List[str] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Dict , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : Union[str, os.PathLike] , *lowercase_ : str , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowercase_ : int):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 512
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "deformable_detr"
SCREAMING_SNAKE_CASE__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self: Union[str, Any] , _lowerCamelCase: List[Any]=True , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: str=3 , _lowerCamelCase: Optional[Any]=3_00 , _lowerCamelCase: Dict=10_24 , _lowerCamelCase: List[Any]=6 , _lowerCamelCase: Tuple=10_24 , _lowerCamelCase: Union[str, Any]=8 , _lowerCamelCase: Union[str, Any]=6 , _lowerCamelCase: List[str]=10_24 , _lowerCamelCase: Optional[Any]=8 , _lowerCamelCase: Any=0.0 , _lowerCamelCase: Union[str, Any]=True , _lowerCamelCase: Optional[int]="relu" , _lowerCamelCase: str=2_56 , _lowerCamelCase: str=0.1 , _lowerCamelCase: Union[str, Any]=0.0 , _lowerCamelCase: Optional[Any]=0.0 , _lowerCamelCase: Union[str, Any]=0.02 , _lowerCamelCase: Optional[Any]=1.0 , _lowerCamelCase: Any=True , _lowerCamelCase: Any=False , _lowerCamelCase: List[str]="sine" , _lowerCamelCase: Any="resnet50" , _lowerCamelCase: List[Any]=True , _lowerCamelCase: Dict=False , _lowerCamelCase: Optional[int]=4 , _lowerCamelCase: Dict=4 , _lowerCamelCase: List[str]=4 , _lowerCamelCase: Optional[int]=False , _lowerCamelCase: Dict=3_00 , _lowerCamelCase: Optional[int]=False , _lowerCamelCase: Tuple=1 , _lowerCamelCase: Optional[Any]=5 , _lowerCamelCase: Union[str, Any]=2 , _lowerCamelCase: int=1 , _lowerCamelCase: List[Any]=1 , _lowerCamelCase: Optional[int]=5 , _lowerCamelCase: int=2 , _lowerCamelCase: int=0.1 , _lowerCamelCase: Tuple=0.25 , _lowerCamelCase: List[str]=False , **_lowerCamelCase: Tuple , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = use_timm_backbone
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = num_queries
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = auxiliary_loss
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = dilation
# deformable attributes
SCREAMING_SNAKE_CASE_ = num_feature_levels
SCREAMING_SNAKE_CASE_ = encoder_n_points
SCREAMING_SNAKE_CASE_ = decoder_n_points
SCREAMING_SNAKE_CASE_ = two_stage
SCREAMING_SNAKE_CASE_ = two_stage_num_proposals
SCREAMING_SNAKE_CASE_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE_ = class_cost
SCREAMING_SNAKE_CASE_ = bbox_cost
SCREAMING_SNAKE_CASE_ = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ = eos_coefficient
SCREAMING_SNAKE_CASE_ = focal_alpha
SCREAMING_SNAKE_CASE_ = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _A ( self: List[Any] ):
return self.encoder_attention_heads
@property
def _A ( self: int ):
return self.d_model
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output
| 89
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = FlaxAutoencoderKL
@property
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ = jax.random.uniform(_lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
| 89
| 1
|
import torch
from diffusers import DiffusionPipeline
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
def __call__( self ):
lowercase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase = 1
lowercase = self.unet(snake_case , snake_case ).sample
lowercase = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
lowercase = scheduler_output - scheduler_output + torch.ones_like(snake_case )
return result
| 84
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : Any = """OwlViTImageProcessor"""
_UpperCamelCase : Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , snake_case=None , snake_case=None , **snake_case ):
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
lowercase = kwargs.pop('feature_extractor' )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="max_length" , snake_case="np" , **snake_case ):
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case , snake_case ) or (isinstance(snake_case , snake_case ) and not isinstance(text[0] , snake_case )):
lowercase = [self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )]
elif isinstance(snake_case , snake_case ) and isinstance(text[0] , snake_case ):
lowercase = []
# Maximum number of queries across batch
lowercase = max([len(snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case ) != max_num_queries:
lowercase = t + [' '] * (max_num_queries - len(snake_case ))
lowercase = self.tokenizer(snake_case , padding=snake_case , return_tensors=snake_case , **snake_case )
encodings.append(snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowercase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowercase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowercase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowercase = BatchEncoding()
lowercase = input_ids
lowercase = attention_mask
if query_images is not None:
lowercase = BatchEncoding()
lowercase = self.image_processor(
snake_case , return_tensors=snake_case , **snake_case ).pixel_values
lowercase = query_pixel_values
if images is not None:
lowercase = self.image_processor(snake_case , return_tensors=snake_case , **snake_case )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case ) , tensor_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_object_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.image_processor.post_process_image_guided_detection(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , *snake_case , **snake_case ):
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 84
| 1
|
from typing import Any
import numpy as np
def __magic_name__ ( __a : np.ndarray ):
'''simple docstring'''
return np.array_equal(__a , matrix.conjugate().T )
def __magic_name__ ( __a : np.ndarray , __a : np.ndarray ):
'''simple docstring'''
UpperCamelCase__ = v.conjugate().T
UpperCamelCase__ = v_star.dot(__a )
assert isinstance(__a , np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase__ = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), f"{a} is not hermitian."
print(rayleigh_quotient(__a , __a ) )
UpperCamelCase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), f"{a} is not hermitian."
assert rayleigh_quotient(__a , __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 712
|
def __magic_name__ ( __a : int = 50 ):
'''simple docstring'''
UpperCamelCase__ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 86
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Optional[int] = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class UpperCamelCase__ ( a_ , a_):
"""simple docstring"""
__UpperCAmelCase = """dinat"""
__UpperCAmelCase = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : List[Any]=6_4 , UpperCamelCase_ : int=[3, 4, 6, 5] , UpperCamelCase_ : Optional[Any]=[2, 4, 8, 1_6] , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_ : str=3.0 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=1e-5 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = len(UpperCamelCase_ )
__magic_name__ = num_heads
__magic_name__ = kernel_size
__magic_name__ = dilations
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
__magic_name__ = layer_scale_init_value
__magic_name__ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(UpperCamelCase_ ) + 1 )]
__magic_name__ , __magic_name__ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
| 545
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case : Tuple = 2_0_4_8
snake_case : str = 4_0_9_6
snake_case : int = 4_2
snake_case : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case : List[str] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def A ( __snake_case: Any ) -> Optional[int]:
"""simple docstring"""
def choose_first(__snake_case: Union[str, Any] , __snake_case: List[str]=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
__magic_name__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
__magic_name__ = {'id': example['id']}
__magic_name__ = example['annotations']
__magic_name__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ = ['yes'] if 1 in yes_no_answer else ['no']
__magic_name__ = __magic_name__ = []
__magic_name__ = __magic_name__ = []
__magic_name__ = ['<cls>']
else:
__magic_name__ = ['short']
__magic_name__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
__magic_name__ = ['long']
__magic_name__ = choose_first(annotation['long_answer'] , is_long_answer=__snake_case )
__magic_name__ = []
answer.update(__snake_case )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ = True
else:
__magic_name__ = False
__magic_name__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def A ( __snake_case: Any , __snake_case: str=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = example['document']['tokens']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ = example['document']['tokens']
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ = doc['is_html'][answer['start_token'] : answer['end_token']]
__magic_name__ = doc['token'][answer['start_token'] : answer['end_token']]
__magic_name__ = ' '.join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , __snake_case , end='\n' )
print('Old:' , __snake_case , end='\n\n' )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: Tuple=2_0_4_8 , __snake_case: List[str]=4_0_9_6 , __snake_case: Optional[int]=True ) -> Any:
"""simple docstring"""
__magic_name__ = get_context_and_ans(__snake_case , assertion=__snake_case )
__magic_name__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
__magic_name__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = []
__magic_name__ = []
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__snake_case ),
"end_token": [-1_0_0] * len(__snake_case ),
"category": category,
},
}
__magic_name__ = out['context'].split()
__magic_name__ = splitted_context[answer['end_token']]
__magic_name__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=__snake_case , ).input_ids )
__magic_name__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
if assertion:
__magic_name__ = tokenizer.decode(__snake_case )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , __snake_case , end='\n\n' )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
__magic_name__ = [] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ = start_token - i + q_len
__magic_name__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
__magic_name__ = -1_0_0
__magic_name__ = -1_0_0
answers_category.append('null' )
__magic_name__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(__snake_case ) )
print('Old:' , tokenizer.decode(__snake_case ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A ( __snake_case: Tuple , __snake_case: Dict , __snake_case: Any=2_0_4_8 , __snake_case: List[Any]=4_0_9_6 , __snake_case: Any=False ) -> Tuple:
"""simple docstring"""
__magic_name__ = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def A ( __snake_case: Optional[int] , __snake_case: Union[str, Any] ) -> Dict:
"""simple docstring"""
with jsonlines.open(__snake_case , 'a' ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc='Saving samples ... ' ):
__magic_name__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case : Any = load_dataset("""natural_questions""")
snake_case : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case : Tuple = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case : List[Any] = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case : int = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case : Union[str, Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 545
| 1
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int=2 , lowercase_ : str=8 , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : int=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=99 , lowercase_ : Any=16 , lowercase_ : int=5 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=36 , lowercase_ : Any="gelu" , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=0.0 , lowercase_ : Union[str, Any]=512 , lowercase_ : Any=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : int=0.02 , lowercase_ : List[str]=3 , lowercase_ : str=4 , lowercase_ : List[str]=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
def A_ ( self : Any ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[str] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def A_ ( self : Dict ):
snake_case_ = self.get_config()
snake_case_ = 300
return config
def A_ ( self : List[Any] ):
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = self.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ = MraModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ , token_type_ids=lowercase_ )
snake_case_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] , ):
snake_case_ = True
snake_case_ = MraModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , encoder_hidden_states=lowercase_ , )
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : int , lowercase_ : Any ):
snake_case_ = MraForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple ):
snake_case_ = MraForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Any , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] ):
snake_case_ = self.num_labels
snake_case_ = MraForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int ):
snake_case_ = self.num_labels
snake_case_ = MraForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple ):
snake_case_ = self.num_choices
snake_case_ = MraForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase , unittest.TestCase ):
snake_case_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = ()
def A_ ( self : Optional[Any] ):
snake_case_ = MraModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def A_ ( self : int ):
self.config_tester.run_common_tests()
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : Dict ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def A_ ( self : Tuple ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def A_ ( self : Optional[int] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def A_ ( self : Tuple ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = MraModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason='''MRA does not output attentions''' )
def A_ ( self : Tuple ):
return
@require_torch
class a ( unittest.TestCase ):
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(lowercase_ )[0]
snake_case_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def A_ ( self : Optional[int] ):
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
snake_case_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(lowercase_ )[0]
snake_case_ = 5_0265
snake_case_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
@slow
def A_ ( self : List[str] ):
snake_case_ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
snake_case_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
snake_case_ = model(lowercase_ )[0]
snake_case_ = 5_0265
snake_case_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , lowercase_ )
snake_case_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 718
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : int = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : str = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
snake_case_ = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ), dtype=__UpperCAmelCase )[0]
@deprecated(__UpperCAmelCase, '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
snake_case_ = _readaa(__UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = bytestream.read(rows * cols * num_images )
snake_case_ = numpy.frombuffer(__UpperCAmelCase, dtype=numpy.uinta )
snake_case_ = data.reshape(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, 1 )
return data
@deprecated(__UpperCAmelCase, '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = labels_dense.shape[0]
snake_case_ = numpy.arange(__UpperCAmelCase ) * num_classes
snake_case_ = numpy.zeros((num_labels, num_classes) )
snake_case_ = 1
return labels_one_hot
@deprecated(__UpperCAmelCase, '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=10 ) -> Dict:
'''simple docstring'''
print('''Extracting''', f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
snake_case_ = _readaa(__UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
snake_case_ = _readaa(__UpperCAmelCase )
snake_case_ = bytestream.read(__UpperCAmelCase )
snake_case_ = numpy.frombuffer(__UpperCAmelCase, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__UpperCAmelCase, __UpperCAmelCase )
return labels
class a :
@deprecated(
lowercase_ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Tuple=False , lowercase_ : Tuple=False , lowercase_ : Optional[Any]=dtypes.floataa , lowercase_ : Any=True , lowercase_ : Optional[int]=None , ):
snake_case_ ,snake_case_ = random_seed.get_seed(lowercase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case_ = dtypes.as_dtype(lowercase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
snake_case_ = 1_0000
snake_case_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"images.shape: {images.shape} labels.shape: {labels.shape}"
snake_case_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case_ = images.astype(numpy.floataa )
snake_case_ = numpy.multiply(lowercase_ , 1.0 / 255.0 )
snake_case_ = images
snake_case_ = labels
snake_case_ = 0
snake_case_ = 0
@property
def A_ ( self : int ):
return self._images
@property
def A_ ( self : Tuple ):
return self._labels
@property
def A_ ( self : str ):
return self._num_examples
@property
def A_ ( self : List[str] ):
return self._epochs_completed
def A_ ( self : str , lowercase_ : List[str] , lowercase_ : Optional[int]=False , lowercase_ : Dict=True ):
if fake_data:
snake_case_ = [1] * 784
snake_case_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase_ )],
[fake_label for _ in range(lowercase_ )],
)
snake_case_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case_ = self.images[perma]
snake_case_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case_ = self._num_examples - start
snake_case_ = self._images[start : self._num_examples]
snake_case_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case_ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase_ )
snake_case_ = self.images[perm]
snake_case_ = self.labels[perm]
# Start next epoch
snake_case_ = 0
snake_case_ = batch_size - rest_num_examples
snake_case_ = self._index_in_epoch
snake_case_ = self._images[start:end]
snake_case_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__UpperCAmelCase, '''Please write your own downloading logic.''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
if not gfile.Exists(__UpperCAmelCase ):
gfile.MakeDirs(__UpperCAmelCase )
snake_case_ = os.path.join(__UpperCAmelCase, __UpperCAmelCase )
if not gfile.Exists(__UpperCAmelCase ):
urllib.request.urlretrieve(__UpperCAmelCase, __UpperCAmelCase ) # noqa: S310
with gfile.GFile(__UpperCAmelCase ) as f:
snake_case_ = f.size()
print('''Successfully downloaded''', __UpperCAmelCase, __UpperCAmelCase, '''bytes.''' )
return filepath
@deprecated(
__UpperCAmelCase, '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=False, __UpperCAmelCase=False, __UpperCAmelCase=dtypes.floataa, __UpperCAmelCase=True, __UpperCAmelCase=5000, __UpperCAmelCase=None, __UpperCAmelCase=DEFAULT_SOURCE_URL, ) -> Tuple:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=__UpperCAmelCase, one_hot=__UpperCAmelCase, dtype=__UpperCAmelCase, seed=__UpperCAmelCase )
snake_case_ = fake()
snake_case_ = fake()
snake_case_ = fake()
return _Datasets(train=__UpperCAmelCase, validation=__UpperCAmelCase, test=__UpperCAmelCase )
if not source_url: # empty string check
snake_case_ = DEFAULT_SOURCE_URL
snake_case_ = '''train-images-idx3-ubyte.gz'''
snake_case_ = '''train-labels-idx1-ubyte.gz'''
snake_case_ = '''t10k-images-idx3-ubyte.gz'''
snake_case_ = '''t10k-labels-idx1-ubyte.gz'''
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + train_images_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_images(__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + train_labels_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_labels(__UpperCAmelCase, one_hot=__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + test_images_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_images(__UpperCAmelCase )
snake_case_ = _maybe_download(
__UpperCAmelCase, __UpperCAmelCase, source_url + test_labels_file )
with gfile.Open(__UpperCAmelCase, '''rb''' ) as f:
snake_case_ = _extract_labels(__UpperCAmelCase, one_hot=__UpperCAmelCase )
if not 0 <= validation_size <= len(__UpperCAmelCase ):
snake_case_ = (
'''Validation size should be between 0 and '''
F"{len(__UpperCAmelCase )}. Received: {validation_size}."
)
raise ValueError(__UpperCAmelCase )
snake_case_ = train_images[:validation_size]
snake_case_ = train_labels[:validation_size]
snake_case_ = train_images[validation_size:]
snake_case_ = train_labels[validation_size:]
snake_case_ = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
snake_case_ = _DataSet(__UpperCAmelCase, __UpperCAmelCase, **__UpperCAmelCase )
return _Datasets(train=__UpperCAmelCase, validation=__UpperCAmelCase, test=__UpperCAmelCase )
| 593
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'vit_mae'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=0.75 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : int = image_size
UpperCAmelCase : str = patch_size
UpperCAmelCase : str = num_channels
UpperCAmelCase : Any = qkv_bias
UpperCAmelCase : Any = decoder_num_attention_heads
UpperCAmelCase : Tuple = decoder_hidden_size
UpperCAmelCase : List[Any] = decoder_num_hidden_layers
UpperCAmelCase : Dict = decoder_intermediate_size
UpperCAmelCase : str = mask_ratio
UpperCAmelCase : Optional[Any] = norm_pix_loss
| 160
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: Dict = {
"configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"],
"feature_extraction_whisper": ["WhisperFeatureExtractor"],
"processing_whisper": ["WhisperProcessor"],
"tokenization_whisper": ["WhisperTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Union[str, Any] = ["WhisperTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWhisperForConditionalGeneration",
"TFWhisperModel",
"TFWhisperPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: str = [
"FlaxWhisperForConditionalGeneration",
"FlaxWhisperModel",
"FlaxWhisperPreTrainedModel",
"FlaxWhisperForAudioClassification",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 160
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase : List[Any] = 2**power
UpperCAmelCase : List[Any] = 0
while n:
UpperCAmelCase , UpperCAmelCase : Optional[Any] = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 695
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowercase__ = logging.get_logger(__name__)
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None:
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 695
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Any=24 , lowerCamelCase__ : Optional[Any]=16 , lowerCamelCase__ : int=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Optional[Any]=37 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=10 , lowerCamelCase__ : Optional[Any]=0.02 , lowerCamelCase__ : str=None , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[Any]=2 , ):
a__ : str = parent
a__ : Any = batch_size
a__ : Dict = patch_size
a__ : List[Any] = max_length
a__ : str = num_mel_bins
a__ : Optional[Any] = is_training
a__ : Optional[int] = use_labels
a__ : List[Any] = hidden_size
a__ : str = num_hidden_layers
a__ : Any = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : List[Any] = type_sequence_label_size
a__ : Any = initializer_range
a__ : str = scope
a__ : List[str] = frequency_stride
a__ : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : List[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
a__ : List[str] = (self.max_length - self.patch_size) // self.time_stride + 1
a__ : Tuple = frequency_out_dimension * time_out_dimension
a__ : List[str] = num_patches + 2
def _UpperCamelCase( self : List[str] ):
a__ : Any = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
a__ : List[Any] = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : List[str] = self.get_config()
return config, input_values, labels
def _UpperCamelCase( self : Optional[int] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] ):
a__ : List[Any] = ASTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Dict = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : str ):
a__ : Dict = self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) : Optional[int] = config_and_inputs
a__ : List[Any] = {"input_values": input_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowercase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCamelCase( self : str ):
a__ : str = ASTModelTester(self )
a__ : Any = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _UpperCamelCase( self : List[str] ):
pass
def _UpperCamelCase( self : Optional[int] ):
a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Any = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : Tuple ):
a__, a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Dict = model_class(lowerCamelCase__ )
a__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Optional[Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Union[str, Any] = ASTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : Optional[int] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
a__, a__ : List[str] = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : List[str] ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _UpperCamelCase( self : Optional[int] ):
a__ : int = self.default_feature_extractor
a__ : Optional[Any] = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCamelCase__ )
a__ : Any = self.default_feature_extractor
a__, a__ : Dict = prepare_audio()
a__ : str = audio.squeeze().numpy()
a__ : Any = feature_extractor(lowerCamelCase__ , sampling_rate=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(**lowerCamelCase__ )
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
a__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 37
|
'''simple docstring'''
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE_ = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
SCREAMING_SNAKE_CASE_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCAmelCase__ ,1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE_ = primes[:idx]
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE_ = False
for r in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = pow(lowerCAmelCase__ ,d * 2**r ,lowerCAmelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _UpperCamelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 294
| 0
|
"""simple docstring"""
import unittest
import numpy as np
def a ( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray | None = None , ) -> np.ndarray:
__magic_name__: Union[str, Any] = np.shape(__UpperCAmelCase )
__magic_name__: Optional[Any] = np.shape(__UpperCAmelCase )
__magic_name__: Any = np.shape(__UpperCAmelCase )
if shape_a[0] != shape_b[0]:
__magic_name__: Optional[Any] = (
"""Expected the same number of rows for A and B. """
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__UpperCAmelCase )
if shape_b[1] != shape_c[1]:
__magic_name__: Union[str, Any] = (
"""Expected the same number of columns for B and C. """
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__UpperCAmelCase )
__magic_name__: Dict = pseudo_inv
if a_inv is None:
try:
__magic_name__: Optional[Any] = np.linalg.inv(__UpperCAmelCase )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> None:
__magic_name__: Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__: Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__: Any = np.array([[2, 1], [6, 3]] )
__magic_name__: List[str] = schur_complement(__snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = np.block([[a, b], [b.T, c]] )
__magic_name__: List[Any] = np.linalg.det(__snake_case )
__magic_name__: Union[str, Any] = np.linalg.det(__snake_case )
__magic_name__: Dict = np.linalg.det(__snake_case )
self.assertAlmostEqual(__snake_case , det_a * det_s )
def lowerCamelCase__ ( self : Union[str, Any] ) -> None:
__magic_name__: List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__: Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__: Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> None:
__magic_name__: Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__magic_name__: int = np.array([[0, 3], [3, 0], [2, 3]] )
__magic_name__: Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__snake_case ):
schur_complement(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 213
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a ( __UpperCAmelCase : int ) -> Tuple:
__magic_name__: int = _TestCommandArgs(dataset=__UpperCAmelCase , all_configs=__UpperCAmelCase , save_infos=__UpperCAmelCase )
__magic_name__: List[str] = TestCommand(*__UpperCAmelCase )
test_command.run()
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
assert os.path.exists(__UpperCAmelCase )
__magic_name__: str = DatasetInfosDict.from_directory(__UpperCAmelCase )
__magic_name__: Optional[int] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__magic_name__, __magic_name__: Tuple = getattr(dataset_infos["""default"""] , __UpperCAmelCase ), getattr(expected_dataset_infos["""default"""] , __UpperCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__UpperCAmelCase , __UpperCAmelCase )
elif key == "splits":
assert list(__UpperCAmelCase ) == list(__UpperCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 213
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCamelCase_ ( lowercase_ ):
'''simple docstring'''
lowerCAmelCase = '''fnet'''
def __init__( self , a=3_20_00 , a=7_68 , a=12 , a=30_72 , a="gelu_new" , a=0.1 , a=5_12 , a=4 , a=0.02 , a=1E-12 , a=False , a=5_12 , a=3 , a=1 , a=2 , **a , ) -> Dict:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_tpu_fourier_optimizations
snake_case_ = tpu_short_seq_length
| 198
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__SCREAMING_SNAKE_CASE =CLIPImageProcessor()
__SCREAMING_SNAKE_CASE =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
__SCREAMING_SNAKE_CASE =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 425
| 0
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase : Dict = 16
lowerCAmelCase : Optional[int] = 32
def lowercase (_A , _A = 1_6 , _A = "bert-base-cased" ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(_A )
_lowerCAmelCase : Tuple = load_dataset('glue' , 'mrpc' )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : List[Any] = datasets.map(
_A , batched=_A , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return tokenizer.pad(_A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=_A )
_lowerCAmelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def lowercase (_A , _A , _A , _A ):
"""simple docstring"""
model.eval()
_lowerCAmelCase : List[str] = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Any = model(**_A )
_lowerCAmelCase : List[str] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCAmelCase , _lowerCAmelCase : List[Any] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
_lowerCAmelCase : List[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCAmelCase : int = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
_lowerCAmelCase : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : str = config['lr']
_lowerCAmelCase : int = int(config['num_epochs'] )
_lowerCAmelCase : Tuple = int(config['seed'] )
_lowerCAmelCase : int = int(config['batch_size'] )
_lowerCAmelCase : int = args.model_name_or_path
set_seed(_A )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
_lowerCAmelCase : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCAmelCase : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
_lowerCAmelCase : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[Any] = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
_lowerCAmelCase : Any = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
_lowerCAmelCase : List[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Optional[Any] = evaluate.load('glue' , 'mrpc' )
_lowerCAmelCase : List[Any] = num_epochs
if args.partial_train_epoch is not None:
_lowerCAmelCase : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_lowerCAmelCase : str = args.resume_from_checkpoint.split('epoch_' )[1]
_lowerCAmelCase : str = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_lowerCAmelCase : Any = int(_A ) + 1
_lowerCAmelCase : str = evaluation_loop(_A , _A , _A , _A )
accelerator.print('resumed checkpoint performance:' , _A )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , 'r' ) as f:
_lowerCAmelCase : Tuple = json.load(_A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_lowerCAmelCase : Tuple = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
_lowerCAmelCase : Any = model(**_A )
_lowerCAmelCase : str = outputs.loss
_lowerCAmelCase : Any = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_lowerCAmelCase : Dict = f'epoch_{epoch}'
_lowerCAmelCase : List[Any] = os.path.join(args.output_dir , _A )
accelerator.save_state(_A )
_lowerCAmelCase : List[str] = evaluation_loop(_A , _A , _A , _A )
_lowerCAmelCase : str = accuracy
_lowerCAmelCase : Union[str, Any] = lr_scheduler.get_lr()[0]
_lowerCAmelCase : Optional[int] = optimizer.param_groups[0]['lr']
_lowerCAmelCase : Dict = epoch
_lowerCAmelCase : Optional[int] = overall_step
accelerator.print(f'epoch {epoch}:' , _A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , 'w' ) as f:
json.dump(_A , _A )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_A , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_A , )
parser.add_argument(
'--output_dir' , type=_A , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_A , default=_A , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=_A , default=_A , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=_A , default=2 , help='Number of train epochs.' , )
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : List[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 630
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ) -> bool:
if num < 0:
return False
lowerCamelCase_ = num
lowerCamelCase_ = 0
while num > 0:
lowerCamelCase_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 686
| 0
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = 'EncodecFeatureExtractor'
lowerCamelCase__ : Dict = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> Optional[int]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self : Optional[Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : int ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""audio""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""sampling_rate""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""text""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
SCREAMING_SNAKE_CASE__ = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
SCREAMING_SNAKE_CASE__ = audio_inputs["""padding_mask"""]
return inputs
def SCREAMING_SNAKE_CASE ( self : int , *__UpperCAmelCase : int , **__UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = kwargs.pop("""audio""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""padding_mask""" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : int ) -> str:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ = to_numpy(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = audio_values.shape
if padding_mask is None:
return list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = to_numpy(__UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
SCREAMING_SNAKE_CASE__ = seq_len - padding_mask.shape[-1]
SCREAMING_SNAKE_CASE__ = 1 - self.feature_extractor.padding_value
SCREAMING_SNAKE_CASE__ = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = audio_values.tolist()
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
SCREAMING_SNAKE_CASE__ = sliced_audio.reshape(__UpperCAmelCase , -1 )
return audio_values
| 616
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'unispeech'
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : Optional[Any]=3_0_7_2 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=1e-5 , __UpperCAmelCase : List[Any]="group" , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=1_2_8 , __UpperCAmelCase : str=1_6 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=0.05 , __UpperCAmelCase : str=1_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=3_2_0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Any=1_0_0 , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]="mean" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=8_0 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=0.5 , **__UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_ctc_classes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ = num_codevector_groups
SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ = num_negatives
SCREAMING_SNAKE_CASE__ = codevector_dim
SCREAMING_SNAKE_CASE__ = proj_codevector_dim
SCREAMING_SNAKE_CASE__ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 616
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = KandinskyVaaPipeline
lowercase_ = [
"""image_embeds""",
"""negative_image_embeds""",
]
lowercase_ = ["""image_embeds""", """negative_image_embeds"""]
lowercase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowercase_ = False
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 3_2
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return 1_0_0
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A =UNetaDConditionModel(**lowercase__ )
return model
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.dummy_unet
__A =self.dummy_movq
__A =DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase__ , )
__A ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self , lowercase__ , lowercase__=0 ):
'''simple docstring'''
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__A =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__A =torch.manual_seed(lowercase__ )
else:
__A =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__A ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self ):
'''simple docstring'''
__A ='''cpu'''
__A =self.get_dummy_components()
__A =self.pipeline_class(**lowercase__ )
__A =pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__A =pipe(**self.get_dummy_inputs(lowercase__ ) )
__A =output.images
__A =pipe(
**self.get_dummy_inputs(lowercase__ ) , return_dict=lowercase__ , )[0]
__A =image[0, -3:, -3:, -1]
__A =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__A =np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__A =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
__A =KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__A =pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__A ='''red cat, 4k photo'''
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A , __A =pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A =torch.Generator(device='''cuda''' ).manual_seed(0 )
__A =pipeline(
image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_0_0 , output_type='''np''' , )
__A =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 184
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """LayoutLMv2ImageProcessor"""
lowercase_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__A =kwargs.pop('''feature_extractor''' )
__A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__A =self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
__A =[text] # add batch dimension (as the image processor always adds a batch dimension)
__A =features['''words''']
__A =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__A =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__A =self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__A =images
return encoded_inputs
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowercase__ )} and {len(lowercase__ )}''' )
return images_with_overflow
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 184
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = None ,):
lowercase = {}
if train_file is not None:
lowercase = [train_file]
if eval_file is not None:
lowercase = [eval_file]
if test_file is not None:
lowercase = [test_file]
lowercase = datasets.load_dataset("""csv""" ,data_files=lowerCAmelCase__ )
lowercase = list(ds[list(files.keys() )[0]].features.keys() )
lowercase = features_name.pop(lowerCAmelCase__ )
lowercase = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase = {label: i for i, label in enumerate(lowerCAmelCase__ )}
lowercase = tokenizer.model_input_names
lowercase = {}
if len(lowerCAmelCase__ ) == 1:
for k in files.keys():
lowercase = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="""max_length""" ) ,batched=lowerCAmelCase__ ,)
elif len(lowerCAmelCase__ ) == 2:
for k in files.keys():
lowercase = ds[k].map(
lambda lowerCAmelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) ,truncation=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="""max_length""" ,) ,batched=lowerCAmelCase__ ,)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase = {k: v for k, v in ex.items() if k in input_names}
lowercase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase = {k: v for k, v in ex.items() if k in input_names}
lowercase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase = {k: v for k, v in ex.items() if k in input_names}
lowercase = labelaid[ex[label_name]]
yield (d, label)
lowercase = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase = (
tf.data.Dataset.from_generator(
lowerCAmelCase__ ,({k: tf.intaa for k in input_names}, tf.intaa) ,({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) ,)
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class A_ :
_A = field(metadata={'''help''': '''Which column contains the label'''} )
_A = field(default=__a , metadata={'''help''': '''The path of the training file'''} )
_A = field(default=__a , metadata={'''help''': '''The path of the development file'''} )
_A = field(default=__a , metadata={'''help''': '''The path of the test file'''} )
_A = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A = field(
default=__a , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A_ :
_A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A = field(default=__a , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
lowercase , lowercase , lowercase , lowercase = get_tfds(
train_file=data_args.train_file ,eval_file=data_args.dev_file ,test_file=data_args.test_file ,tokenizer=lowerCAmelCase__ ,label_column_id=data_args.label_column_id ,max_seq_length=data_args.max_seq_length ,)
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=len(lowerCAmelCase__ ) ,labelaid=lowerCAmelCase__ ,idalabel={id: label for label, id in labelaid.items()} ,finetuning_task="""text-classification""" ,cache_dir=model_args.cache_dir ,)
with training_args.strategy.scope():
lowercase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_pt=bool(""".bin""" in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,)
def compute_metrics(lowerCAmelCase__ ) -> Dict:
lowercase = np.argmax(p.predictions ,axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase = TFTrainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=lowerCAmelCase__ ,eval_dataset=lowerCAmelCase__ ,compute_metrics=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate()
lowercase = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
with open(lowerCAmelCase__ ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(lowerCAmelCase__ )
return results
if __name__ == "__main__":
main()
| 709
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Any =logging.get_logger('''transformers.models.speecht5''')
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] ={
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] ={
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__SCREAMING_SNAKE_CASE : List[Any] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : List[str] ={
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Optional[int] ={
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : List[str] =[
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__SCREAMING_SNAKE_CASE : List[str] =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__SCREAMING_SNAKE_CASE : Any =IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ )
if weight_type is not None:
lowercase = getattr(lowerCAmelCase__ ,lowerCAmelCase__ ).shape
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "running_mean":
lowercase = value
elif weight_type == "running_var":
lowercase = value
elif weight_type == "num_batches_tracked":
lowercase = value
else:
lowercase = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = []
if task == "s2t":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2T
lowercase = IGNORE_KEYS_S2T
elif task == "t2s":
lowercase = None
lowercase = MAPPING_T2S
lowercase = IGNORE_KEYS_T2S
elif task == "s2s":
lowercase = hf_model.speechta.encoder.prenet.feature_encoder
lowercase = MAPPING_S2S
lowercase = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ ,lowerCAmelCase__ ):
logger.info(f"""{name} was ignored""" )
continue
lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,hf_model.config.feat_extract_norm == """group""" ,)
lowercase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase , lowercase = key.split(""".*.""" )
if prefix in name and suffix in name:
lowercase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" ,lowerCAmelCase__ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
lowercase = """weight"""
elif "running_mean" in name:
lowercase = """running_mean"""
elif "running_var" in name:
lowercase = """running_var"""
elif "num_batches_tracked" in name:
lowercase = """num_batches_tracked"""
else:
lowercase = None
set_recursively(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,):
if config_path is not None:
lowercase = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = SpeechTaConfig()
if task == "s2t":
lowercase = config.max_text_positions
lowercase = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
lowercase = 1_876
lowercase = 600
lowercase = config.max_speech_positions
lowercase = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
lowercase = 1_876
lowercase = config.max_speech_positions
lowercase = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
lowercase = SpeechTaTokenizer(lowerCAmelCase__ ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase = AddedToken("""<mask>""" ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ )
lowercase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
lowercase = SpeechTaFeatureExtractor()
lowercase = SpeechTaProcessor(tokenizer=lowerCAmelCase__ ,feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
lowercase = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint["""model"""] ,lowerCAmelCase__ ,lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__SCREAMING_SNAKE_CASE : Optional[Any] =parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 72
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''sew'''
def __init__( self : int , A_ : Optional[Any]=32 , A_ : str=768 , A_ : Any=12 , A_ : Optional[Any]=12 , A_ : str=3072 , A_ : Union[str, Any]=2 , A_ : Union[str, Any]="gelu" , A_ : Dict=0.1 , A_ : Optional[int]=0.1 , A_ : Optional[int]=0.1 , A_ : List[str]=0.0 , A_ : List[str]=0.1 , A_ : int=0.1 , A_ : Any=0.02 , A_ : Tuple=1E-5 , A_ : Optional[Any]="group" , A_ : Union[str, Any]="gelu" , A_ : List[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_ : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_ : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_ : str=False , A_ : int=128 , A_ : Optional[Any]=16 , A_ : List[Any]=True , A_ : List[str]=0.05 , A_ : List[str]=10 , A_ : int=2 , A_ : Union[str, Any]=0.0 , A_ : List[Any]=10 , A_ : Dict=0 , A_ : List[str]="mean" , A_ : Optional[Any]=False , A_ : Union[str, Any]=False , A_ : Optional[int]=256 , A_ : Optional[Any]=0 , A_ : List[Any]=1 , A_ : Optional[int]=2 , **A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = feat_extract_norm
lowerCamelCase_ = feat_extract_activation
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = list(A_ )
lowerCamelCase_ = conv_bias
lowerCamelCase_ = num_conv_pos_embeddings
lowerCamelCase_ = num_conv_pos_embedding_groups
lowerCamelCase_ = len(self.conv_dim )
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = squeeze_factor
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = feat_proj_dropout
lowerCamelCase_ = final_dropout
lowerCamelCase_ = layerdrop
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
# ctc loss
lowerCamelCase_ = ctc_loss_reduction
lowerCamelCase_ = ctc_zero_infinity
# sequence classification
lowerCamelCase_ = use_weighted_layer_sum
lowerCamelCase_ = classifier_proj_size
@property
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 70
|
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_A , _A ) ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if point:
if isinstance(_A , _A ):
for item in point:
if not isinstance(_A , (int, float) ):
snake_case_ = (
"Expected a list of numbers as input, found "
f"{type(_A ).__name__}"
)
raise TypeError(_A )
else:
snake_case_ = f"Expected a list of numbers as input, found {type(_A ).__name__}"
raise TypeError(_A )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_A , _A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 376
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( a_ , a_ , **a_ ) -> Dict:
"""simple docstring"""
A_ : Tuple = AutoConfig.from_pretrained(a_ , **a_ )
A_ : int = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 705
|
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = 'ybelkada/fonts'
def UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
requires_backends(a_ , ["""torch"""] )
_check_torch_version()
A_ : List[Any] = image_tensor.unsqueeze(0 )
A_ : str = torch.nn.functional.unfold(a_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
A_ : int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , a_ , a_ , -1 )
A_ : Dict = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase ( a_ , a_ = 3_6 , a_ = "black" , a_ = "white" , a_ = 5 , a_ = 5 , a_ = 5 , a_ = 5 , a_ = None , a_ = None , ) -> Image.Image:
"""simple docstring"""
requires_backends(a_ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
A_ : List[str] = textwrap.TextWrapper(width=8_0 )
A_ : str = wrapper.wrap(text=a_ )
A_ : Dict = """\n""".join(a_ )
if font_bytes is not None and font_path is None:
A_ : Any = io.BytesIO(a_ )
elif font_path is not None:
A_ : Optional[int] = font_path
else:
A_ : int = hf_hub_download(a_ , """Arial.TTF""" )
A_ : List[Any] = ImageFont.truetype(a_ , encoding="""UTF-8""" , size=a_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A_ : int = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , a_ ) )
A_ , A_ , A_ , A_ : Optional[Any] = temp_draw.textbbox((0, 0) , a_ , a_ )
# Create the actual image with a bit of padding around the text.
A_ : str = text_width + left_padding + right_padding
A_ : List[str] = text_height + top_padding + bottom_padding
A_ : Optional[Any] = Image.new("""RGB""" , (image_width, image_height) , a_ )
A_ : Union[str, Any] = ImageDraw.Draw(a_ )
draw.text(xy=(left_padding, top_padding) , text=a_ , fill=a_ , font=a_ )
return image
def UpperCAmelCase ( a_ , a_ , **a_ ) -> List[Any]:
"""simple docstring"""
requires_backends(a_ , """vision""" )
# Convert to PIL image if necessary
A_ : Union[str, Any] = to_pil_image(a_ )
A_ : Tuple = render_text(a_ , **a_ )
A_ : int = max(header_image.width , image.width )
A_ : Union[str, Any] = int(image.height * (new_width / image.width) )
A_ : Dict = int(header_image.height * (new_width / header_image.width) )
A_ : List[Any] = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
A_ : Tuple = to_numpy_array(a_ )
if infer_channel_dimension_format(a_ ) == ChannelDimension.LAST:
A_ : Union[str, Any] = to_channel_dimension_format(a_ , ChannelDimension.LAST )
return new_image
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''flattened_patches''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 2048 , _lowerCamelCase = False , **_lowerCamelCase , ) -> None:
super().__init__(**_lowerCamelCase )
A_ : List[str] = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
A_ : Union[str, Any] = do_normalize
A_ : Any = do_convert_rgb
A_ : int = max_patches
A_ : Dict = is_vqa
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> np.ndarray:
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
A_ : List[str] = to_channel_dimension_format(_lowerCamelCase , ChannelDimension.FIRST )
A_ : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
A_ , A_ : Optional[int] = patch_size["""height"""], patch_size["""width"""]
A_ , A_ : List[Any] = get_image_size(_lowerCamelCase )
# maximize scale s.t.
A_ : Union[str, Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A_ : List[Any] = max(min(math.floor(scale * image_height / patch_height ) , _lowerCamelCase ) , 1 )
A_ : int = max(min(math.floor(scale * image_width / patch_width ) , _lowerCamelCase ) , 1 )
A_ : Optional[Any] = max(num_feasible_rows * patch_height , 1 )
A_ : Any = max(num_feasible_cols * patch_width , 1 )
A_ : Any = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=_lowerCamelCase , antialias=_lowerCamelCase , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A_ : str = torch_extract_patches(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A_ : int = patches.shape
A_ : Optional[Any] = patches_shape[1]
A_ : Optional[int] = patches_shape[2]
A_ : Any = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A_ : Any = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A_ : Union[str, Any] = torch.arange(_lowerCamelCase ).reshape([rows, 1] ).repeat(1 , _lowerCamelCase ).reshape([rows * columns, 1] )
A_ : Optional[Any] = torch.arange(_lowerCamelCase ).reshape([1, columns] ).repeat(_lowerCamelCase , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A_ : str = row_ids.to(torch.floataa )
A_ : Optional[int] = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A_ : Tuple = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A_ : List[str] = torch.nn.functional.pad(_lowerCamelCase , [0, 0, 0, max_patches - (rows * columns)] ).float()
A_ : Any = to_numpy_array(_lowerCamelCase )
return result
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ) -> np.ndarray:
if image.dtype == np.uinta:
A_ : Union[str, Any] = image.astype(np.floataa )
# take mean across the whole `image`
A_ : str = np.mean(_lowerCamelCase )
A_ : Union[str, Any] = np.std(_lowerCamelCase )
A_ : List[str] = max(_lowerCamelCase , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ) -> ImageInput:
A_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
A_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[Any] = patch_size if patch_size is not None else self.patch_size
A_ : Union[str, Any] = max_patches if max_patches is not None else self.max_patches
A_ : Dict = self.is_vqa
if kwargs.get("""data_format""" , _lowerCamelCase ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
A_ : int = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Tuple = [convert_to_rgb(_lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_lowerCamelCase ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
A_ : List[str] = kwargs.pop("""font_bytes""" , _lowerCamelCase )
A_ : Optional[int] = kwargs.pop("""font_path""" , _lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : List[str] = [header_text] * len(_lowerCamelCase )
A_ : Dict = [
render_header(_lowerCamelCase , header_text[i] , font_bytes=_lowerCamelCase , font_path=_lowerCamelCase )
for i, image in enumerate(_lowerCamelCase )
]
if do_normalize:
A_ : str = [self.normalize(image=_lowerCamelCase ) for image in images]
# convert to torch tensor and permute
A_ : Union[str, Any] = [
self.extract_flattened_patches(image=_lowerCamelCase , max_patches=_lowerCamelCase , patch_size=_lowerCamelCase )
for image in images
]
# create attention mask in numpy
A_ : Optional[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A_ : Union[str, Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=_lowerCamelCase )
return encoded_outputs
| 385
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="llama"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["past_key_values"]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str]=3_20_00 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Dict=1_10_08 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=32 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]="silu" , SCREAMING_SNAKE_CASE__ : Tuple=20_48 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Any=1e-6 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCamelCase = num_attention_heads
UpperCamelCase = num_key_value_heads
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = rms_norm_eps
UpperCamelCase = pretraining_tp
UpperCamelCase = use_cache
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
UpperCamelCase = self.rope_scaling.get('type' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.rope_scaling.get('factor' , SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 282
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 282
| 1
|
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 3, SCREAMING_SNAKE_CASE__ : int = 7, SCREAMING_SNAKE_CASE__ : int = 1000000 ) -> int:
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Tuple = 1
for current_denominator in range(1, limit + 1 ):
UpperCAmelCase_ : Optional[int] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
UpperCAmelCase_ : int = current_numerator
UpperCAmelCase_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 644
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None)
snake_case_ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
snake_case_ : Any = df.iloc[:, 1:2]
snake_case_ : str = actual_data.values.reshape(len_data, 1)
snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
snake_case_ : List[str] = 10
snake_case_ : Any = 5
snake_case_ : Any = 20
snake_case_ : Tuple = len_data - periods * look_back
snake_case_ : str = actual_data[:division]
snake_case_ : Optional[int] = actual_data[division - look_back :]
snake_case_ ,snake_case_ : Any = [], []
snake_case_ ,snake_case_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
snake_case_ : Any = np.array(train_x)
snake_case_ : Optional[Any] = np.array(test_x)
snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y])
snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y])
snake_case_ : List[Any] = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
snake_case_ : Dict = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
snake_case_ : Optional[Any] = model.predict(x_test)
| 644
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = filter(lambda A__ : p.requires_grad ,model.parameters() )
UpperCAmelCase_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase_ = logging.getLogger(__name__)
def snake_case ( A__ ,A__ ):
if metric == "rouge2":
UpperCAmelCase_ : int = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCAmelCase_ : Optional[Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCAmelCase_ : Tuple = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
UpperCAmelCase_ : Dict = ModelCheckpoint(
dirpath=A__ ,filename=A__ ,monitor=F"""val_{metric}""" ,mode="max" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def snake_case ( A__ ,A__ ):
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode="min" if "loss" in metric else "max" ,patience=A__ ,verbose=A__ ,)
class UpperCamelCase_ (pl.Callback ):
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Dict = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase_ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCAmelCase_ : Optional[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCAmelCase_ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCAmelCase_ : List[str] = od / "test_results.txt"
UpperCAmelCase_ : Any = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCAmelCase_ : Union[str, Any] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCAmelCase_ : List[str] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase_ )
with open(lowerCAmelCase_ , "a+" ) as writer:
for key in sorted(lowerCAmelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCAmelCase_ : Any = metrics[key]
if isinstance(lowerCAmelCase_ , torch.Tensor ):
UpperCAmelCase_ : Optional[Any] = val.item()
UpperCAmelCase_ : str = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase_ )
if not save_generations:
return
if "preds" in metrics:
UpperCAmelCase_ : str = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(lowerCAmelCase_ )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Dict:
try:
UpperCAmelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
UpperCAmelCase_ : Optional[Any] = pl_module.model.num_parameters()
UpperCAmelCase_ : Tuple = count_trainable_parameters(lowerCAmelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase_ , lowerCAmelCase_ , "test" )
@rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : pl.Trainer , lowerCAmelCase_ : List[Any] ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 95
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a : Dict = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a : Optional[int] = concatenate_datasets
a : List[Any] = DownloadConfig
a : List[Any] = DownloadManager
a : str = DownloadMode
a : int = DownloadConfig
a : List[str] = DownloadMode
a : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 639
| 0
|
'''simple docstring'''
def _A ( snake_case__ : int = 10 , snake_case__ : int = 22 ):
snake_case__ : Union[str, Any] = range(1 , snake_case__ )
snake_case__ : Optional[Any] = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''')
| 694
|
'''simple docstring'''
def _A ( snake_case__ : float ):
return 10 - x * x
def _A ( snake_case__ : float , snake_case__ : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(snake_case__ ) * equation(snake_case__ ) >= 0:
raise ValueError('''Wrong space!''' )
snake_case__ : List[str] = a
while (b - a) >= 0.01:
# Find middle point
snake_case__ : Optional[int] = (a + b) / 2
# Check if middle point is root
if equation(snake_case__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(snake_case__ ) * equation(snake_case__ ) < 0:
snake_case__ : Dict = c
else:
snake_case__ : List[str] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 694
| 1
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__UpperCamelCase : List[str] = '''docs/source/en/_toctree.yml'''
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : str = defaultdict(_A )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCAmelCase__ : Tuple = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ : Optional[Any] = []
for duplicate_key in duplicates:
lowerCAmelCase__ : Dict = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_A ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_A , key=lambda A_ : s["title"].lower() )
def __SCREAMING_SNAKE_CASE ( A_=False ):
with open(_A , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ : int = content[api_idx]['''sections''']
# Then to the model doc
lowerCAmelCase__ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCAmelCase__ : Optional[Any] = api_doc[model_idx]['''sections''']
lowerCAmelCase__ : int = [(idx, section) for idx, section in enumerate(_A ) if '''sections''' in section]
lowerCAmelCase__ : Optional[int] = False
for idx, modality_doc in modalities_docs:
lowerCAmelCase__ : Tuple = modality_doc['''sections''']
lowerCAmelCase__ : int = clean_model_doc_toc(_A )
if old_modality_doc != new_modality_doc:
lowerCAmelCase__ : List[Any] = True
if overwrite:
lowerCAmelCase__ : Any = new_modality_doc
if diff:
if overwrite:
lowerCAmelCase__ : List[Any] = model_doc
lowerCAmelCase__ : List[str] = api_doc
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_A , allow_unicode=_A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__UpperCamelCase : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 450
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_ ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ):
'''simple docstring'''
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 493
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
snake_case__ : Optional[Union[str, Path]] = None
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : Optional[Dict] = None
snake_case__ : Optional[str] = None
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = False
snake_case__ : bool = True
snake_case__ : Optional[int] = None
snake_case__ : int = 1
snake_case__ : Optional[Union[str, bool]] = None
snake_case__ : bool = False
snake_case__ : Optional[Dict] = None
snake_case__ : Optional[str] = None
def _UpperCamelCase ( self :Optional[int] ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__magic_name__ ) for k, v in self.__dict__.items()} )
| 158
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[str]=13 , __magic_name__ :Tuple=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :str=True , __magic_name__ :Optional[Any]=True , __magic_name__ :int=True , __magic_name__ :Optional[Any]=99 , __magic_name__ :Optional[int]=32 , __magic_name__ :str=5 , __magic_name__ :List[Any]=4 , __magic_name__ :str=37 , __magic_name__ :List[str]="gelu" , __magic_name__ :str=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :Any=512 , __magic_name__ :int=16 , __magic_name__ :Tuple=2 , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :List[str]=False , __magic_name__ :List[Any]=True , __magic_name__ :List[Any]="None" , __magic_name__ :str=3 , __magic_name__ :Optional[int]=4 , __magic_name__ :Dict=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = relative_attention
a__ = position_biased_input
a__ = pos_att_type
a__ = scope
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self :int ) -> str:
'''simple docstring'''
a__ = self.get_config()
a__ = 300
return config
def _UpperCamelCase ( self :str , __magic_name__ :str ) -> List[Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :Tuple , __magic_name__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
a__ = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
a__ = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
a__ = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Any , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :Union[str, Any] ) -> str:
'''simple docstring'''
a__ = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :List[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[Any] , __magic_name__ :Tuple ) -> str:
'''simple docstring'''
a__ = self.num_labels
a__ = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def _UpperCamelCase ( self :int , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Optional[int] , __magic_name__ :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ = self.num_labels
a__ = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self :str , __magic_name__ :Dict , __magic_name__ :Any , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Union[str, Any] , __magic_name__ :str , __magic_name__ :Tuple ) -> Tuple:
'''simple docstring'''
a__ = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[Any] = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
snake_case__ : Any = False
snake_case__ : List[str] = False
snake_case__ : Any = False
snake_case__ : Dict = False
def _UpperCamelCase ( self :str ) -> Union[str, Any]:
'''simple docstring'''
a__ = DebertaModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def _UpperCamelCase ( self :Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def _UpperCamelCase ( self :Any ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def _UpperCamelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def _UpperCamelCase ( self :str ) -> int:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def _UpperCamelCase ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
pass
@slow
def _UpperCamelCase ( self :Tuple ) -> str:
'''simple docstring'''
a__ = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
a__ = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
a__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
a__ = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 158
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowercase_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowercase_ = parser.parse_args()
lowercase_ = '''cpu'''
lowercase_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowercase_ = '''path-to-your-trained-model'''
lowercase_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowercase_ = pipe.to(device)
# to channels last
lowercase_ = pipe.unet.to(memory_format=torch.channels_last)
lowercase_ = pipe.vae.to(memory_format=torch.channels_last)
lowercase_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowercase_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowercase_ = torch.randn(2, 4, 64, 64)
lowercase_ = torch.rand(1) * 999
lowercase_ = torch.randn(2, 77, 768)
lowercase_ = (sample, timestep, encoder_hidden_status)
try:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowercase_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowercase_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowercase_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowercase_ = 666
lowercase_ = torch.Generator(device).manual_seed(seed)
lowercase_ = {'''generator''': generator}
if args.steps is not None:
lowercase_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowercase_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 154
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__SCREAMING_SNAKE_CASE : List[str] = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_( lowercase_ : str ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_( lowercase_ : str ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_( ) -> None:
_lowerCamelCase = '''Morse code here!'''
print(lowercase_ )
_lowerCamelCase = encrypt(lowercase_ )
print(lowercase_ )
_lowerCamelCase = decrypt(lowercase_ )
print(lowercase_ )
if __name__ == "__main__":
main()
| 661
| 0
|
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__UpperCamelCase = '''naver-clova-ix/donut-base'''
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = DonutProcessor.from_pretrained(lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
SCREAMING_SNAKE_CASE = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
SCREAMING_SNAKE_CASE = self.processor.tokenajson(lowerCAmelCase__ )
self.assertDictEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 327
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase (SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ) -> int:
SCREAMING_SNAKE_CASE = defaultdict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ):
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 327
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
def __init__( self , __a = 6 ):
'''simple docstring'''
__a : Tuple = None
__a : Any = None
self.create_linked_list(__a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = Node()
__a : List[str] = current_node
__a : Tuple = current_node
__a : Optional[Any] = current_node
for _ in range(1 , __a ):
__a : List[str] = Node()
__a : Any = current_node
__a : List[str] = previous_node
__a : Dict = current_node
__a : Any = self.front
__a : Dict = previous_node
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
__a : Optional[int] = self.rear.next
if self.rear:
__a : List[str] = data
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
__a : List[str] = self.front.data
__a : Dict = None
return data
__a : Any = self.front
__a : Optional[int] = old_front.next
__a : Optional[int] = old_front.data
__a : int = None
return data
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise Exception('Empty Queue' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Union[str, Any] = None
__a : Optional[int] = None
__a : Union[str, Any] = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__=False , ):
output_path.parent.mkdir(parents=A__ , exist_ok=A__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , use_external_data_format=A__ , enable_onnx_checker=A__ , opset_version=A__ , )
else:
export(
A__ , A__ , f=output_path.as_posix() , input_names=A__ , output_names=A__ , dynamic_axes=A__ , do_constant_folding=A__ , opset_version=A__ , )
@torch.no_grad()
def UpperCamelCase_ ( A__ , A__ , A__ , A__ = False ):
a_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a_ = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
a_ = """cpu"""
a_ = Path(A__ )
# VAE DECODER
a_ = AutoencoderKL.from_pretrained(model_path + """/vae""" )
a_ = vae_decoder.config.latent_channels
# forward only through the decoder part
a_ = vae_decoder.decode
onnx_export(
A__ , model_args=(
torch.randn(1 , A__ , 25 , 25 ).to(device=A__ , dtype=A__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=A__ , )
del vae_decoder
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 263
| 0
|
"""simple docstring"""
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) - 1
def __A ( self , lowerCAmelCase__ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowerCAmelCase__ ) , 5 ) == 1
return output_values
def __A ( self , lowerCAmelCase__ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE = self.basis_function(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __A ( self , lowerCAmelCase__ = 0.01 ) -> Union[str, Any]:
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE = self.bezier_curve_function(lowerCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE = [i[1] for i in self.list_of_points]
plt.plot(
lowerCAmelCase__ , lowerCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 327
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase () -> List[Any]:
raise RuntimeError('CUDA out of memory.' )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def __A ( self , lowerCAmelCase__ ) -> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = mock_training_loop_function('hello' )
self.assertListEqual(lowerCAmelCase__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def __A ( self ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase__ ):
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def __A ( self ) -> str:
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function(128 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def __A ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase__ ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(lowerCAmelCase__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = release_memory(lowerCAmelCase__ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase__ )
| 327
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class UpperCAmelCase ( _UpperCamelCase ):
a__: List[str] = 'visual_bert'
def __init__( self : Dict , lowerCAmelCase : Optional[int]=3_0522 , lowerCAmelCase : List[Any]=768 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Dict=3072 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : int=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : List[Any]=1E-12 , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Optional[Any]=2 , **lowerCAmelCase : Dict , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase : List[str] = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : Tuple = hidden_size
lowercase : int = visual_embedding_dim
lowercase : Union[str, Any] = num_hidden_layers
lowercase : Optional[Any] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : int = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : List[str] = initializer_range
lowercase : Union[str, Any] = type_vocab_size
lowercase : List[Any] = layer_norm_eps
lowercase : Optional[int] = bypass_transformer
lowercase : Dict = special_visual_initialize
| 583
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __magic_name__ ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Any ):
torch.manual_seed(0 )
_a : int = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Union[str, Any] ):
torch.manual_seed(0 )
_a : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Union[str, Any] = self.dummy_uncond_unet
_a : Union[str, Any] = DDIMScheduler()
_a : Union[str, Any] = self.dummy_vq_model
_a : List[Any] = LDMPipeline(unet=_UpperCAmelCase ,vqvae=_UpperCAmelCase ,scheduler=_UpperCAmelCase )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = torch.manual_seed(0 )
_a : int = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ).images
_a : Tuple = torch.manual_seed(0 )
_a : List[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_UpperCAmelCase )[0]
_a : Any = image[0, -3:, -3:, -1]
_a : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Union[str, Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Optional[int] ):
_a : Dict = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = ldm(generator=_UpperCAmelCase ,num_inference_steps=5 ,output_type='numpy' ).images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Union[str, Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_a : List[str] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 358
| 0
|
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_SCREAMING_SNAKE_CASE = yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
_SCREAMING_SNAKE_CASE = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = {
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
_SCREAMING_SNAKE_CASE = """\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = (
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
_SCREAMING_SNAKE_CASE = """\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = (
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
_SCREAMING_SNAKE_CASE = """\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
_SCREAMING_SNAKE_CASE = """\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
_SCREAMING_SNAKE_CASE = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
assert ReadMe.from_string(_lowerCAmelCase , _lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
with pytest.raises(_lowerCAmelCase , match=re.escape(expected_error.format(path="root" ) ) ):
__snake_case = ReadMe.from_string(_lowerCAmelCase , _lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
with pytest.raises(_lowerCAmelCase , match=re.escape(expected_error.format(path="root" ) ) ):
ReadMe.from_string(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
ReadMe.from_string(_lowerCAmelCase , _lowerCAmelCase , suppress_parsing_errors=_lowerCAmelCase )
@pytest.mark.parametrize(
"readme_md, expected_dict" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = Path(_lowerCAmelCase ) / "README.md"
with open(_lowerCAmelCase , "w+" ) as readme_file:
readme_file.write(_lowerCAmelCase )
__snake_case = ReadMe.from_readme(_lowerCAmelCase , _lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = Path(_lowerCAmelCase ) / "README.md"
with open(_lowerCAmelCase , "w+" ) as readme_file:
readme_file.write(_lowerCAmelCase )
__snake_case = expected_error.format(path=_lowerCAmelCase )
with pytest.raises(_lowerCAmelCase , match=re.escape(_lowerCAmelCase ) ):
__snake_case = ReadMe.from_readme(_lowerCAmelCase , _lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
"readme_md, expected_error" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = Path(_lowerCAmelCase ) / "README.md"
with open(_lowerCAmelCase , "w+" ) as readme_file:
readme_file.write(_lowerCAmelCase )
__snake_case = expected_error.format(path=_lowerCAmelCase )
with pytest.raises(_lowerCAmelCase , match=re.escape(_lowerCAmelCase ) ):
ReadMe.from_readme(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"readme_md," , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = Path(_lowerCAmelCase ) / "README.md"
with open(_lowerCAmelCase , "w+" ) as readme_file:
readme_file.write(_lowerCAmelCase )
ReadMe.from_readme(_lowerCAmelCase , _lowerCAmelCase , suppress_parsing_errors=_lowerCAmelCase )
| 716
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((5_12, 5_12) ) )
elif input_type == "audio":
inputs.append(torch.ones(30_00 ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE , (str, AgentText) ):
output_types.append("text" )
elif isinstance(SCREAMING_SNAKE_CASE , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(SCREAMING_SNAKE_CASE , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class __magic_name__ :
def lowerCAmelCase ( self : Optional[int] ):
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__snake_case = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__snake_case = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase ( self : int ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__snake_case = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowerCAmelCase ( self : Union[str, Any] ):
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase ( self : Any ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__snake_case = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__snake_case = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowerCAmelCase ( self : Tuple ):
__snake_case = create_inputs(self.tool.inputs )
__snake_case = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__snake_case = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__snake_case = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 614
| 0
|
'''simple docstring'''
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : str = "▁" , snake_case_ : bool = True , snake_case_ : Union[str, AddedToken] = "<unk>" , snake_case_ : Union[str, AddedToken] = "</s>" , snake_case_ : Union[str, AddedToken] = "<pad>" , ):
snake_case__ : Any = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
snake_case__ : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case__ : Any = token_dict["""token"""]
snake_case__ : Optional[Any] = Tokenizer(Unigram() )
snake_case__ : Tuple = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
snake_case__ : Dict = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=snake_case_ , add_prefix_space=snake_case_ ),
pre_tokenizers.Digits(individual_digits=snake_case_ ),
pre_tokenizers.Punctuation(),
] )
snake_case__ : Union[str, Any] = decoders.Metaspace(replacement=snake_case_ , add_prefix_space=snake_case_ )
snake_case__ : int = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
snake_case__ : Optional[Any] = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : Union[str, List[str]] , snake_case_ : int = 8_000 , snake_case_ : bool = True , ):
snake_case__ : List[Any] = trainers.UnigramTrainer(
vocab_size=snake_case_ , special_tokens=self.special_tokens_list , show_progress=snake_case_ , )
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : Optional[int] = [files]
self._tokenizer.train(snake_case_ , trainer=snake_case_ )
self.add_unk_id()
def lowerCamelCase ( self : str , snake_case_ : Union[Iterator[str], Iterator[Iterator[str]]] , snake_case_ : int = 8_000 , snake_case_ : bool = True , ):
snake_case__ : List[str] = trainers.UnigramTrainer(
vocab_size=snake_case_ , special_tokens=self.special_tokens_list , show_progress=snake_case_ , )
self._tokenizer.train_from_iterator(snake_case_ , trainer=snake_case_ )
self.add_unk_id()
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[int] = json.loads(self._tokenizer.to_str() )
snake_case__ : Union[str, Any] = self.special_tokens["""unk"""]["""id"""]
snake_case__ : List[Any] = Tokenizer.from_str(json.dumps(snake_case_ ) )
| 374
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374
| 1
|
"""simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None ) -> Tuple:
require_version(deps[pkg] , lowerCAmelCase__ )
| 65
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowercase ( lowerCAmelCase__ : List[Any] ) -> str:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Union[str, Any]:
from transformers.testing_utils import pytest_terminal_summary_main
__a = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 65
| 1
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A : List[str] = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Dict , *_UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = eval_examples
lowercase__ = post_process_function
lowercase__ = quant_trainer_args
lowercase__ = 128 # default number of calibration samples
def lowerCamelCase__ (self : int , _UpperCAmelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
lowercase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ = self._remove_unused_columns(_UpperCAmelCase , description="""Calibration""" )
return DataLoader(
_UpperCAmelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_UpperCAmelCase , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Dict=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ = self.get_calib_dataloader(_UpperCAmelCase )
lowercase__ = self.model
quant_trainer.configure_model(_UpperCAmelCase , self.quant_trainer_args , calib=_UpperCAmelCase )
model.eval()
quant_trainer.enable_calibration(_UpperCAmelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f''' Num examples = {self.calib_num}''' )
logger.info(f''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(_UpperCAmelCase ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ = self.prediction_step(_UpperCAmelCase , _UpperCAmelCase , prediction_loss_only=_UpperCAmelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_UpperCAmelCase , self.quant_trainer_args )
lowercase__ = model
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(_UpperCAmelCase )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
_UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
lowercase__ = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase__ = metrics.pop(_UpperCAmelCase )
self.log(_UpperCAmelCase )
else:
lowercase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : str = "test" ) -> int:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
_UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , """predict""" )
lowercase__ = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowercase__ = metrics.pop(_UpperCAmelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[str]="./" ) -> List[str]:
"""simple docstring"""
lowercase__ = self.eval_dataset
lowercase__ = self.get_eval_dataloader(_UpperCAmelCase )
lowercase__ = next(iter(_UpperCAmelCase ) )
# saving device - to make it consistent
lowercase__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
lowercase__ = tuple(v.to(_UpperCAmelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ = True
lowercase__ = self.model.to(_UpperCAmelCase )
model.eval()
model.float()
lowercase__ = model.module if hasattr(_UpperCAmelCase , """module""" ) else model
quant_trainer.configure_model(_UpperCAmelCase , self.quant_trainer_args )
lowercase__ = os.path.join(_UpperCAmelCase , """model.onnx""" )
logger.info(f'''exporting model to {output_model_file}''' )
lowercase__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , export_params=_UpperCAmelCase , opset_version=13 , do_constant_folding=_UpperCAmelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_UpperCAmelCase , )
logger.info("""onnx export finished""" )
| 15
|
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _UpperCamelCase ( __UpperCamelCase = 8 ) -> str:
lowerCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCamelCase )
lowerCamelCase_ = i // 3
lowerCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase_ = (
chars_incl
+ random(__UpperCamelCase ,quotient + remainder )
+ random(__UpperCamelCase ,__UpperCamelCase )
+ random(__UpperCamelCase ,__UpperCamelCase )
)
lowerCamelCase_ = list(__UpperCamelCase )
shuffle(__UpperCamelCase )
return "".join(__UpperCamelCase )
# random is a generalised function for letters, characters and numbers
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return "".join(secrets.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Tuple:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
pass # Put your code here...
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = 8 ) -> bool:
if len(__UpperCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase_ = any(char in ascii_uppercase for char in password )
lowerCamelCase_ = any(char in ascii_lowercase for char in password )
lowerCamelCase_ = any(char in digits for char in password )
lowerCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _UpperCamelCase ( ) -> Optional[int]:
lowerCamelCase_ = int(input('Please indicate the max length of your password: ' ).strip() )
lowerCamelCase_ = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' ,password_generator(__UpperCamelCase ) )
print(
'Alternative Password generated:' ,alternative_password_generator(__UpperCamelCase ,__UpperCamelCase ) ,)
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 42
| 0
|
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return x.sum()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@dataclass
class _lowercase :
_UpperCAmelCase = 42
_UpperCAmelCase = 42
class _lowercase ( UpperCAmelCase__ ):
def A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
a = {}
a = []
a = 1
a = [1, 2]
a = {"a": 1, "b": 2}
a = {"a": [1, 2], "b": [3, 4]}
a = {"a": {"1": 1}, "b": 2}
a = {"a": 1, "b": 2, "c": 3, "d": 4}
a = {}
a = []
a = 2
a = [2, 3]
a = {"a": 2, "b": 3}
a = {"a": [2, 3], "b": [4, 5]}
a = {"a": {"1": 2}, "b": 3}
a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
a = 2
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
a = {"a": 2, "b": 0, "c": 2}
a = {
"a": np.eye(2 ).astype(__lowerCAmelCase ),
"b": np.zeros(3 ).astype(__lowerCAmelCase ),
"c": np.ones(2 ).astype(__lowerCAmelCase ),
}
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase , num_proc=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowerCAmelCase , __lowerCAmelCase , map_numpy=__lowerCAmelCase , num_proc=__lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda __lowerCAmelCase : x + 1 , __lowerCAmelCase , num_proc=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
a = {"a": 1, "b": 2}
a = {"a": 3, "b": 4}
a = {"a": 5, "b": 6}
a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ) , __lowerCAmelCase )
def A ( self : Dict ) -> Dict:
"""simple docstring"""
class _lowercase :
_UpperCAmelCase = '''bar'''
a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__lowerCAmelCase , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Any , UpperCAmelCase__ :str , UpperCAmelCase__ :str ):
'''simple docstring'''
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
a = {F"""{i}""": i for i in range(UpperCAmelCase__ )}
a = map_nested(lambda UpperCAmelCase__ : x + 10 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowercase ( UpperCAmelCase__ ):
@require_tf
def A ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
a = layers.Dense(2 )
def gen_random_output():
a = tf.random.uniform((1, 3) )
return model(__lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=__lowerCAmelCase ):
a = gen_random_output()
with temp_seed(42 , set_tensorflow=__lowerCAmelCase ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A ( self : Dict ) -> Tuple:
"""simple docstring"""
import torch
def gen_random_output():
a = torch.nn.Linear(3 , 2 )
a = torch.rand(1 , 3 )
return model(__lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=__lowerCAmelCase ):
a = gen_random_output()
with temp_seed(42 , set_pytorch=__lowerCAmelCase ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A ( self : Tuple ) -> List[Any]:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
a = gen_random_output()
with temp_seed(42 ):
a = gen_random_output()
a = gen_random_output()
np.testing.assert_equal(__lowerCAmelCase , __lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple ):
'''simple docstring'''
a = NestedDataStructure(UpperCAmelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def UpperCAmelCase__ ( UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = NestedDataStructure(UpperCAmelCase__ ).flatten()
assert output == expected_output
def UpperCAmelCase__ ( ):
'''simple docstring'''
a = A(x=1 , y="foobar" )
a = {"x": 1, "y": "foobar"}
assert asdict(UpperCAmelCase__ ) == expected_output
a = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(UpperCAmelCase__ ) == expected_output
with pytest.raises(UpperCAmelCase__ ):
asdict([1, A(x=10 , y="foo" )] )
def UpperCAmelCase__ ( UpperCAmelCase__ :str ):
'''simple docstring'''
return text.split()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[Any] ):
'''simple docstring'''
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCAmelCase__ ( ):
'''simple docstring'''
with Pool(2 ) as pool:
a = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
a = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(UpperCAmelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
a = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(UpperCAmelCase__ ) == 4
| 32
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'{test_file} instead.' )
SCREAMING_SNAKE_CASE_ : int = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
SCREAMING_SNAKE_CASE_ : Tuple = components[:-1] + [test_fn.replace('.py' , '' )]
SCREAMING_SNAKE_CASE_ : int = '.'.join(lowerCamelCase_ )
return test_module_path
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_module_path(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = importlib.import_module(lowerCamelCase_ )
return test_module
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = get_test_module(lowerCamelCase_ )
for attr in dir(lowerCamelCase_ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[int] = get_test_module(lowerCamelCase_ )
for attr in dir(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCamelCase_ , lowerCamelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(lowerCamelCase_ , 'all_model_classes' , [] )
if len(lowerCamelCase_ ) > 0:
test_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_test_classes(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = test_class()
if hasattr(lowerCamelCase_ , 'setUp' ):
test.setUp()
SCREAMING_SNAKE_CASE_ : str = None
if hasattr(lowerCamelCase_ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = test.model_tester.__class__
return model_tester
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = get_test_classes(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_test_classes_for_model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for test_class in test_classes:
SCREAMING_SNAKE_CASE_ : List[str] = get_model_tester_from_test_class(lowerCamelCase_ )
if tester_class is not None:
tester_classes.append(lowerCamelCase_ )
# sort with class names
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x.__name__ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_test_classes(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = {test_class: get_model_tester_from_test_class(lowerCamelCase_ ) for test_class in test_classes}
return test_tester_mapping
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_model_classes(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = {
model_class: get_test_classes_for_model(lowerCamelCase_ , lowerCamelCase_ ) for model_class in model_classes
}
return model_test_mapping
def __UpperCAmelCase ( lowerCamelCase_ : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = get_model_classes(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
model_class: get_tester_classes_for_model(lowerCamelCase_ , lowerCamelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> str:
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return o
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return o.__name__
elif isinstance(lowerCamelCase_ , (list, tuple) ):
return [to_json(lowerCamelCase_ ) for x in o]
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return {to_json(lowerCamelCase_ ): to_json(lowerCamelCase_ ) for k, v in o.items()}
else:
return o
| 105
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = '''https://openaipublic.azureedge.net/jukebox/models/'''
UpperCamelCase__ : Optional[Any] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> int:
"""simple docstring"""
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[str] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Any = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : Tuple = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE_ : Optional[Any] = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
import re
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : str = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Dict = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_encoder_block_conv_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[str] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_encoder_block_conv_in.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE_ : str = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : str = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[Any] = re_encoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Tuple = re_encoder_block_proj_out.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_decoder_block_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[int] = re_decoder_block_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Dict = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : Dict = re_decoder_block_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re_decoder_block_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : int = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Optional[Any] = re_decoder_block_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_conv_out.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Any = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_conv_out.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = re_prior_cond_resnet.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
SCREAMING_SNAKE_CASE_ : List[str] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
SCREAMING_SNAKE_CASE_ : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : Dict = prefix + resnet_block
SCREAMING_SNAKE_CASE_ : List[str] = re_prior_cond_resnet.sub(lowerCamelCase_ , lowerCamelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Any = re_prior_cond_proj_in.match(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = regex_match.groups()
SCREAMING_SNAKE_CASE_ : List[Any] = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
SCREAMING_SNAKE_CASE_ : List[Any] = re_prior_cond_proj_in.sub(lowerCamelCase_ , lowerCamelCase_ )
# keep original key
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = original_key
SCREAMING_SNAKE_CASE_ : Optional[Any] = replace_key(lowerCamelCase_ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
SCREAMING_SNAKE_CASE_ : str = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
SCREAMING_SNAKE_CASE_ : Dict = original_key
SCREAMING_SNAKE_CASE_ : int = original_key
SCREAMING_SNAKE_CASE_ : int = value
return new_dict
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : int=None , lowerCamelCase_ : int=None ) -> Dict:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
SCREAMING_SNAKE_CASE_ : int = requests.get(F'{PREFIX}{file}' , allow_redirects=lowerCamelCase_ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=lowerCamelCase_ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
SCREAMING_SNAKE_CASE_ : List[str] = MODEL_MAPPING[model_name.split('/' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = JukeboxConfig.from_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = JukeboxModel(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
for i, dict_name in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
SCREAMING_SNAKE_CASE_ : int = {}
for k in old_dic.keys():
if k.endswith('.b' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
SCREAMING_SNAKE_CASE_ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE_ : int = old_dic[k]
else:
SCREAMING_SNAKE_CASE_ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'vqvae' if i == 0 else F'priors.{3 - i}'
SCREAMING_SNAKE_CASE_ : Any = fix_jukebox_keys(lowerCamelCase_ , model.state_dict() , lowerCamelCase_ , lowerCamelCase_ )
weight_dict.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase_ )
return weight_dict
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
UpperCamelCase__ : str = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 105
| 1
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__A = 637_8137.0
__A = 635_6752.31_4245
__A = 6_3_7_8_1_3_7
def _SCREAMING_SNAKE_CASE ( A : float , A : float , A : float , A : float ) -> float:
"""simple docstring"""
__snake_case : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
__snake_case : Dict = atan((1 - flattening) * tan(radians(A ) ) )
__snake_case : str = atan((1 - flattening) * tan(radians(A ) ) )
__snake_case : List[Any] = radians(A )
__snake_case : str = radians(A )
# Equation
__snake_case : str = sin((phi_a - phi_a) / 2 )
__snake_case : Union[str, Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__snake_case : str = sqrt(sin_sq_phi + (cos(A ) * cos(A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
| 1
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCAmelCase = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase : Any , lowercase : Tuple , lowercase : Optional[int] ) ->List[Any]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def _lowerCAmelCase ( lowercase : np.ndarray , lowercase : Optional[str] , lowercase : Optional[str] = None ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase__ = to_pil_image(lowercase )
lowercase__ , lowercase__ = pil_image.size
lowercase__ = pytesseract.image_to_data(lowercase , lang=lowercase , output_type='''dict''' , config=lowercase )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase__ = [idx for idx, word in enumerate(lowercase ) if not word.strip()]
lowercase__ = [word for idx, word in enumerate(lowercase ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase__ = []
for x, y, w, h in zip(lowercase , lowercase , lowercase , lowercase ):
lowercase__ = [x, y, x + w, y + h]
actual_boxes.append(lowercase )
# finally, normalize the bounding boxes
lowercase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase , lowercase , lowercase ) )
assert len(lowercase ) == len(lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A ( a ):
"""simple docstring"""
A_ = ['pixel_values']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = "" , **_lowerCamelCase , )-> None:
super().__init__(**_lowerCamelCase )
lowercase__ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase__ = get_size_dict(_lowerCamelCase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = apply_ocr
lowercase__ = ocr_lang
lowercase__ = tesseract_config
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , )-> np.ndarray:
lowercase__ = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = (size['''height'''], size['''width'''])
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , )-> PIL.Image.Image:
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(_lowerCamelCase )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase__ = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(_lowerCamelCase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase__ = []
lowercase__ = []
for image in images:
lowercase__ , lowercase__ = apply_tesseract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
words_batch.append(_lowerCamelCase )
boxes_batch.append(_lowerCamelCase )
if do_resize:
lowercase__ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase__ = [flip_channel_order(_lowerCamelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
lowercase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=_lowerCamelCase )
if apply_ocr:
lowercase__ = words_batch
lowercase__ = boxes_batch
return data
| 161
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowerCAmelCase = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_lowerCAmelCase = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
_lowerCAmelCase = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def snake_case_( self )-> Dict:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , )-> List[Any]:
lowercase__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowercase__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
lowercase__ = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
lowercase__ = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 161
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
UpperCamelCase_ : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case = 1_00_00
snake_case = None
snake_case = None
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case = ParquetConfig
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Any ) -> Optional[int]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
A_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_snake_case , (str, list, tuple) ):
A_ = data_files
if isinstance(_snake_case , _snake_case ):
A_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ = [dl_manager.iter_files(_snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A_ = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case ):
A_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
A_ = [dl_manager.iter_files(_snake_case ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_snake_case ):
with open(_snake_case , "rb" ) as f:
A_ = datasets.Features.from_arrow_schema(pq.read_schema(_snake_case ) )
break
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={"files": files} ) )
return splits
def lowerCamelCase__ ( self : Tuple , _snake_case : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ = table_cast(_snake_case , self.info.features.arrow_schema )
return pa_table
def lowerCamelCase__ ( self : Optional[int] , _snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
A_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case ) ):
with open(_snake_case , "rb" ) as f:
A_ = pq.ParquetFile(_snake_case )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
A_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_snake_case )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_snake_case )}: {e}' )
raise
| 704
|
"""simple docstring"""
from __future__ import annotations
import math
UpperCamelCase_ : List[str] = '''2020.9.26'''
UpperCamelCase_ : List[Any] = '''xcodz-dot, cclaus, dhruvmanila'''
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not all(isinstance(__a , (float, int) ) for val in locals().values() ):
A_ = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__a )
A_ = ((x * distance) / (z + distance)) * scale
A_ = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def A_ (__a , __a , __a , __a , __a ):
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError("Axis must be a str" )
A_ = locals()
del input_variables["axis"]
if not all(isinstance(__a , (float, int) ) for val in input_variables.values() ):
A_ = (
"Input values except axis must either be float or int: "
f'{list(input_variables.values() )}'
)
raise TypeError(__a )
A_ = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A_ = x * math.cos(__a ) - y * math.sin(__a )
A_ = y * math.cos(__a ) + x * math.sin(__a )
A_ = z
elif axis == "x":
A_ = y * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + y * math.sin(__a )
A_ = x
elif axis == "y":
A_ = x * math.cos(__a ) - z * math.sin(__a )
A_ = z * math.cos(__a ) + x * math.sin(__a )
A_ = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 482
| 0
|
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(size + 1)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : float
for row in range(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = matrix[row][col]
SCREAMING_SNAKE_CASE : int = vector[row][0]
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE : List[str] = max((abs(augmented[rowa][col]), rowa) for rowa in range(_a , _a))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _a):
SCREAMING_SNAKE_CASE : Dict = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE : int = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _a):
for row in range(_a):
SCREAMING_SNAKE_CASE : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(_a , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_a)
]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = len(_a)
SCREAMING_SNAKE_CASE : Matrix = [[0 for _ in range(_a)] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix = [[0] for _ in range(_a)]
SCREAMING_SNAKE_CASE : Matrix
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : int
for x_val, y_val in enumerate(_a):
for col in range(_a):
SCREAMING_SNAKE_CASE : Any = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE : Tuple = y_val
SCREAMING_SNAKE_CASE : Any = solve(_a , _a)
def interpolated_func(_a) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_a))
return interpolated_func
def lowerCamelCase__ ( _a):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _a = question_function , _a = 10):
SCREAMING_SNAKE_CASE : list[int] = [func(_a) for x_val in range(1 , order + 1)]
SCREAMING_SNAKE_CASE : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Callable[[int], int]
SCREAMING_SNAKE_CASE : int
for poly in polynomials:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
while func(_a) == poly(_a):
x_val += 1
ret += poly(_a)
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25
|
a_ = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset([])
a_ = frozenset(['image'])
a_ = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image'])
a_ = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'negative_prompt'])
a_ = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a_ = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a_ = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['image', 'mask_image'])
a_ = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a_ = frozenset(['example_image', 'image', 'mask_image'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['class_labels'])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(['batch_size'])
a_ = frozenset([])
a_ = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a_ = frozenset(['prompt', 'negative_prompt'])
a_ = frozenset(['input_tokens'])
a_ = frozenset(['input_tokens'])
| 25
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCAmelCase : Tuple = CycleDiffusionPipeline
__UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
__UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__UpperCAmelCase : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , num_train_timesteps=1_0_0_0 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(__snake_case )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Any:
_a = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
_a = image / 2 + 0.5
if str(__snake_case ).startswith("mps" ):
_a = torch.manual_seed(__snake_case )
else:
_a = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_a = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = CycleDiffusionPipeline(**__snake_case )
_a = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a = self.get_dummy_inputs(__snake_case )
_a = pipe(**__snake_case )
_a = output.images
_a = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.get_dummy_components()
for name, module in components.items():
if hasattr(__snake_case , "half" ):
_a = module.half()
_a = CycleDiffusionPipeline(**__snake_case )
_a = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a = self.get_dummy_inputs(__snake_case )
_a = pipe(**__snake_case )
_a = output.images
_a = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
_a = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowerCAmelCase ( self ) -> Any:
return super().test_save_load_local()
@unittest.skip("non-deterministic pipeline" )
def __lowerCAmelCase ( self ) -> Any:
return super().test_inference_batch_single_identical()
@skip_mps
def __lowerCAmelCase ( self ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCAmelCase ( self ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self ) -> Tuple:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" )
_a = init_image.resize((5_1_2, 5_1_2) )
_a = '''CompVis/stable-diffusion-v1-4'''
_a = DDIMScheduler.from_pretrained(__snake_case , subfolder="scheduler" )
_a = CycleDiffusionPipeline.from_pretrained(
__snake_case , scheduler=__snake_case , safety_checker=__snake_case , torch_dtype=torch.floataa , revision="fp16" )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a = '''A black colored car'''
_a = '''A blue colored car'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="np" , )
_a = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/cycle-diffusion/black_colored_car.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" )
_a = init_image.resize((5_1_2, 5_1_2) )
_a = '''CompVis/stable-diffusion-v1-4'''
_a = DDIMScheduler.from_pretrained(__snake_case , subfolder="scheduler" )
_a = CycleDiffusionPipeline.from_pretrained(__snake_case , scheduler=__snake_case , safety_checker=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
_a = '''A black colored car'''
_a = '''A blue colored car'''
_a = torch.manual_seed(0 )
_a = pipe(
prompt=__snake_case , source_prompt=__snake_case , image=__snake_case , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__snake_case , output_type="np" , )
_a = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 705
|
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowercase ( lowerCamelCase__ : Optional[int] ): # picklable for multiprocessing
return x.sum()
def _lowercase ( lowerCamelCase__ : int ): # picklable for multiprocessing
return i + 1
@dataclass
class A :
__UpperCAmelCase : int
__UpperCAmelCase : str
class A ( a ):
def __lowerCAmelCase ( self ) -> Tuple:
_a = {}
_a = []
_a = 1
_a = [1, 2]
_a = {"a": 1, "b": 2}
_a = {"a": [1, 2], "b": [3, 4]}
_a = {"a": {"1": 1}, "b": 2}
_a = {"a": 1, "b": 2, "c": 3, "d": 4}
_a = {}
_a = []
_a = 2
_a = [2, 3]
_a = {"a": 2, "b": 3}
_a = {"a": [2, 3], "b": [4, 5]}
_a = {"a": {"1": 2}, "b": 3}
_a = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ ) , snake_case_ )
_a = 2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_ ) , snake_case_ )
_a = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
_a = {"a": 2, "b": 0, "c": 2}
_a = {
"a": np.eye(2 ).astype(snake_case_ ),
"b": np.zeros(3 ).astype(snake_case_ ),
"c": np.ones(2 ).astype(snake_case_ ),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ) , snake_case_ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_ ): # can't pickle a local lambda
map_nested(lambda snake_case_ : x + 1 , snake_case_ , num_proc=snake_case_ )
def __lowerCAmelCase ( self ) -> Any:
_a = {"a": 1, "b": 2}
_a = {"a": 3, "b": 4}
_a = {"a": 5, "b": 6}
_a = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_ ) ) , snake_case_ )
def __lowerCAmelCase ( self ) -> str:
class A :
__UpperCAmelCase : Optional[int] = """bar"""
_a = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(snake_case_ , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc", [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
], )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[int] ):
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_a = {F'''{i}''': i for i in range(lowerCamelCase__ )}
_a = map_nested(lambda lowerCamelCase__ : x + 10, lowerCamelCase__, num_proc=lowerCamelCase__, parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A ( a ):
@require_tf
def __lowerCAmelCase ( self ) -> Any:
import tensorflow as tf
from tensorflow.keras import layers
_a = layers.Dense(2 )
def gen_random_output():
_a = tf.random.uniform((1, 3) )
return model(snake_case_ ).numpy()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_tensorflow=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import torch
def gen_random_output():
_a = torch.nn.Linear(3 , 2 )
_a = torch.rand(1 , 3 )
return model(snake_case_ ).detach().numpy()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
with temp_seed(4_2 , set_pytorch=snake_case_ ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCAmelCase ( self ) -> Optional[int]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
_a = gen_random_output()
with temp_seed(4_2 ):
_a = gen_random_output()
_a = gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data", [{}] )
def _lowercase ( lowerCamelCase__ : Any ):
_a = NestedDataStructure(lowerCamelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output", [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
], )
def _lowercase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : Dict ):
_a = NestedDataStructure(lowerCamelCase__ ).flatten()
assert output == expected_output
def _lowercase ( ):
_a = A(x=1, y="foobar" )
_a = {"x": 1, "y": "foobar"}
assert asdict(lowerCamelCase__ ) == expected_output
_a = {"a": {"b": A(x=10, y="foo" )}, "c": [A(x=20, y="bar" )]}
_a = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(lowerCamelCase__ ) == expected_output
with pytest.raises(lowerCamelCase__ ):
asdict([1, A(x=10, y="foo" )] )
def _lowercase ( lowerCamelCase__ : str ):
return text.split()
def _lowercase ( lowerCamelCase__ : List[Any] ):
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowercase ( ):
with Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_a = list(iflatmap_unordered(lowerCamelCase__, _split_text, kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(lowerCamelCase__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_a = []
for yield_time, content in iflatmap_unordered(
lowerCamelCase__, _aseconds_generator_of_aitems_with_timing, kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(lowerCamelCase__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(lowerCamelCase__ ) == 4
| 691
| 0
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 167
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462
| 0
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 10 , snake_case__ :int = 22 ) -> int:
_lowercase = range(1 , snake_case__ )
_lowercase = range(1 , snake_case__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 706
|
from __future__ import annotations
from collections.abc import Generator
def SCREAMING_SNAKE_CASE__ ( ) -> Generator[int, None, None]:
_lowercase = {}
_lowercase = 2
while True:
_lowercase = factor_map.pop(snake_case__ , snake_case__ )
if factor:
_lowercase = factor + prime
while x in factor_map:
x += factor
_lowercase = factor
else:
_lowercase = prime
yield prime
prime += 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :float = 1E10 ) -> int:
_lowercase = sieve()
_lowercase = 1
while True:
_lowercase = next(snake_case__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(snake_case__ )
n += 2
if __name__ == "__main__":
print(solution())
| 535
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return getitem, k
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
return setitem, k, v
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return delitem, k
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ) -> Dict:
"""simple docstring"""
try:
return fun(__lowerCAmelCase , *__lowerCAmelCase ), None
except Exception as e:
return None, e
A__ = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
A__ = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
A__ = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
A__ = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
A__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
A__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : Dict = HashMap(initial_block_size=4 )
snake_case__ : Dict = {}
for _, (fun, *args) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : List[Any] = _run_operation(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = _run_operation(__lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase )
assert my_res == py_res
assert str(__lowerCAmelCase ) == str(__lowerCAmelCase )
assert set(__lowerCAmelCase ) == set(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
def is_public(__lowerCAmelCase ) -> bool:
return not name.startswith('''_''' )
snake_case__ : Union[str, Any] = {name for name in dir({} ) if is_public(__lowerCAmelCase )}
snake_case__ : str = {name for name in dir(HashMap() ) if is_public(__lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 252
| 0
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_A = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> Optional[int]:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) ,version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowercase (_snake_case ,_snake_case = None ) -> None:
'''simple docstring'''
__UpperCamelCase = f"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,_SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = requirement, None, None
else:
__UpperCamelCase = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,_SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f""" got {requirement}""" )
__UpperCamelCase , __UpperCamelCase = match[0]
__UpperCamelCase = want_full.split("," ) # there could be multiple requirements
__UpperCamelCase = {}
for w in want_range:
__UpperCamelCase = re.findall(r"^([\s!=<>]{1,2})(.+)" ,_SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f""" but got {requirement}""" )
__UpperCamelCase , __UpperCamelCase = match[0]
__UpperCamelCase = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
__UpperCamelCase = ".".join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
__UpperCamelCase = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def lowercase (_snake_case ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = "Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main"
return require_version(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 701
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(_snake_case ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
__UpperCamelCase = json.load(_snake_case )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(_snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_snake_case ,sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 228
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
'''simple docstring'''
_lowerCAmelCase : str = parent
_lowerCAmelCase : int = 13
_lowerCAmelCase : Dict = 7
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = 99
_lowerCAmelCase : List[Any] = 32
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : int = 4
_lowerCAmelCase : List[Any] = 37
_lowerCAmelCase : Union[str, Any] = 'gelu'
_lowerCAmelCase : Optional[int] = 0.1
_lowerCAmelCase : Optional[int] = 0.1
_lowerCAmelCase : Dict = 512
_lowerCAmelCase : List[Any] = 16
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : int = 0.02
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : List[str] = None
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Any = None
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : str = None
if self.use_labels:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFRoFormerModel(config=__UpperCamelCase )
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase : int = [input_ids, input_mask]
_lowerCAmelCase : Optional[Any] = model(__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = TFRoFormerForCausalLM(config=__UpperCamelCase )
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Any = model(__UpperCamelCase )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = TFRoFormerForMaskedLM(config=__UpperCamelCase )
_lowerCAmelCase : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : Optional[int] = TFRoFormerForSequenceClassification(config=__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = self.num_choices
_lowerCAmelCase : int = TFRoFormerForMultipleChoice(config=__UpperCamelCase )
_lowerCAmelCase : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : int = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase : str = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_labels
_lowerCAmelCase : List[Any] = TFRoFormerForTokenClassification(config=__UpperCamelCase )
_lowerCAmelCase : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = TFRoFormerForQuestionAnswering(config=__UpperCamelCase )
_lowerCAmelCase : Optional[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase : List[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = TFRoFormerModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : List[Any] = model(__UpperCamelCase )[0]
# TODO Replace vocab size
_lowerCAmelCase : List[Any] = 5_0000
_lowerCAmelCase : List[Any] = [1, 6, vocab_size]
self.assertEqual(output.shape , __UpperCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_lowerCAmelCase : Dict = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = tf.constant([[4, 10]] )
_lowerCAmelCase : Tuple = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_lowerCAmelCase : Union[str, Any] = emba(input_ids.shape )
_lowerCAmelCase : Dict = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , atol=self.tolerance )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_lowerCAmelCase : List[str] = emba.weight[:3, :5]
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , atol=self.tolerance )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 1E-4
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_lowerCAmelCase : Optional[int] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_lowerCAmelCase : Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_lowerCAmelCase : Any = embed_positions([2, 16, 768] )[None, None, :, :]
_lowerCAmelCase , _lowerCAmelCase : Any = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_lowerCAmelCase : Optional[int] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_lowerCAmelCase : int = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __UpperCamelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __UpperCamelCase , atol=self.tolerance )
| 444
|
"""simple docstring"""
import math
import unittest
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a_ ( unittest.TestCase ):
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 555
| 0
|
'''simple docstring'''
a : dict[tuple[int, int, int], int] = {}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase : List[Any] = _calculate(days - 1 , __magic_name__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase : List[Any] = _calculate(days - 1 , __magic_name__ , 0 )
UpperCAmelCase : Dict = state_late + state_absent + state_ontime
UpperCAmelCase : Optional[int] = prizestrings
return prizestrings
def lowercase ( __magic_name__ = 30 ):
'''simple docstring'''
return _calculate(__magic_name__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 609
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLNetTokenizer
SCREAMING_SNAKE_CASE__ : int = XLNetTokenizerFast
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Any = True
def A_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : List[str] = XLNetTokenizer(snake_case , keep_accents=snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "<s>"
UpperCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case ) , 1_0_0_6 )
def A_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = XLNetTokenizer(snake_case , keep_accents=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
UpperCAmelCase : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case )
UpperCAmelCase : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = {"input_ids": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 609
| 1
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self : Tuple ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
__a = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__a = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 582
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""biogpt"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_2_3_8_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : List[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : str=0.0_2 , SCREAMING_SNAKE_CASE__ : str=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : int=2 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = scale_embedding
__a = use_cache
__a = layerdrop
__a = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 582
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = """realm"""
def __init__( self : Dict , lowercase_ : int=30522 , lowercase_ : List[Any]=768 , lowercase_ : Any=128 , lowercase_ : int=12 , lowercase_ : List[Any]=12 , lowercase_ : Optional[Any]=8 , lowercase_ : Optional[Any]=3072 , lowercase_ : str="gelu_new" , lowercase_ : Any=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[Any]=512 , lowercase_ : int=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Dict=1E-12 , lowercase_ : Any=256 , lowercase_ : int=10 , lowercase_ : Optional[int]=1E-3 , lowercase_ : Optional[int]=5 , lowercase_ : List[str]=320 , lowercase_ : List[str]=13353718 , lowercase_ : List[str]=5000 , lowercase_ : int=1 , lowercase_ : int=0 , lowercase_ : Dict=2 , **lowercase_ : List[str] , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
# Common config
snake_case_ : Any = vocab_size
snake_case_ : Tuple = max_position_embeddings
snake_case_ : List[str] = hidden_size
snake_case_ : Union[str, Any] = retriever_proj_size
snake_case_ : Any = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Dict = num_candidates
snake_case_ : Tuple = intermediate_size
snake_case_ : Dict = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : List[str] = type_vocab_size
snake_case_ : Dict = layer_norm_eps
# Reader config
snake_case_ : str = span_hidden_size
snake_case_ : List[Any] = max_span_width
snake_case_ : Union[str, Any] = reader_layer_norm_eps
snake_case_ : Any = reader_beam_size
snake_case_ : Any = reader_seq_len
# Retrieval config
snake_case_ : Any = num_block_records
snake_case_ : Optional[Any] = searcher_beam_size
| 485
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The column name of the images in the files."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the training data."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""})
_lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = {}
if self.train_dir is not None:
snake_case_ : str = self.train_dir
if self.validation_dir is not None:
snake_case_ : Union[str, Any] = self.validation_dir
snake_case_ : Tuple = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : str = field(default=lowerCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCAmelCase : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""})
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""})
def __lowercase ( _a ):
snake_case_ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[str] = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : List[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case_ : Tuple = split['''train''']
snake_case_ : str = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case_ : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case_ : Tuple = ViTMAEForPreTraining(_a )
if training_args.do_train:
snake_case_ : List[str] = ds['''train'''].column_names
else:
snake_case_ : Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case_ : Tuple = data_args.image_column_name
elif "image" in column_names:
snake_case_ : Tuple = '''image'''
elif "img" in column_names:
snake_case_ : str = '''img'''
else:
snake_case_ : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case_ : str = image_processor.size['''shortest_edge''']
else:
snake_case_ : Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case_ : str = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
snake_case_ : Tuple = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case_ : List[str] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case_ : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
snake_case_ : Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case_ : str = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Any = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : List[str] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
snake_case_ : Optional[int] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def __lowercase ( _a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 485
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.