code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : str = emb.weight.shape
lowercase : str = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowercase : Tuple = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : List[str] = torch.load(lowercase__ , map_location="""cpu""" )
lowercase : Optional[Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
lowercase : Tuple = mam_aaa['model']
remove_ignore_keys_(lowercase__ )
lowercase : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase : str = MaMaaaConfig(
vocab_size=lowercase__ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowercase : Optional[int] = state_dict['decoder.embed_tokens.weight']
lowercase : int = MaMaaaForConditionalGeneration(lowercase__ )
model.model.load_state_dict(lowercase__ , strict=lowercase__ )
lowercase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Any = parser.parse_args()
lowercase : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 336
|
from pathlib import Path
import numpy as np
from PIL import Image
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def _a ( lowercase__ : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE__ : List[Any] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg"
SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB")
pil_img.save("result_dilation.png")
| 85
| 0
|
def UpperCAmelCase__ ( UpperCAmelCase__ :int = 10_00 ):
'''simple docstring'''
a = 3
a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowercase ( UpperCAmelCase__ ):
def A ( self : Optional[int] , __lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a = input_file.read()
a = regexp.search(__lowerCAmelCase )
return match
def A ( self : List[Any] , __lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , encoding="utf-8" ) as input_file:
a = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a = regexp.finditer(__lowerCAmelCase )
a = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : List[str] ) -> List[Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
a = Path("./datasets" )
a = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 32
| 1
|
from __future__ import annotations
from typing import Any
def A__ ( snake_case_ : list[Any] ):
create_state_space_tree(snake_case_ , [] , 0 )
def A__ ( snake_case_ : list[Any] , snake_case_ : list[Any] , snake_case_ : int ):
if index == len(snake_case_ ):
print(snake_case_ )
return
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowercase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 64
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :str , __lowerCamelCase :List[str] ):
if gpta_config_file == "":
_lowerCAmelCase = GPTaConfig()
else:
_lowerCAmelCase = GPTaConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase = GPTaModel(_lowerCamelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowerCAmelCase = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _lowerCamelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
_lowercase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """vocab.txt"""}
_lowercase = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_lowercase = {
"""openbmb/cpm-ant-10b""": 1024,
}
def A (__lowerCamelCase :str ):
_lowerCAmelCase = collections.OrderedDict()
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase="<unk>" , _lowercase=200 ):
"""simple docstring"""
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = list(_lowercase )
if len(_lowercase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(_lowercase ):
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_lowercase )
_lowerCAmelCase = end
return sub_tokens
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : int = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Dict = ['''input_ids''', '''attention_mask''']
_lowercase : Union[str, Any] = False
def __init__( self , _lowercase , _lowercase="<d>" , _lowercase="</d>" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="</n>" , _lowercase="</_>" , _lowercase="left" , **_lowercase , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=_lowercase , eod_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , unk_token=_lowercase , line_token=_lowercase , space_token=_lowercase , padding_side=_lowercase , **_lowercase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(_lowercase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def _lowercase ( self ):
"""simple docstring"""
return self.encoder["\n"]
@property
def _lowercase ( self ):
"""simple docstring"""
return len(self.encoder )
def _lowercase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = []
for x in jieba.cut(_lowercase , cut_all=_lowercase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_lowercase ) )
return output_tokens
def _lowercase ( self , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_lowercase , **_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return token in self.encoder
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return "".join(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
return self.decoder.get(_lowercase , self.unk_token )
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if os.path.isdir(_lowercase ):
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _lowercase : x[1] ) )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowercase ( self , _lowercase , _lowercase = None , _lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase ))
return [1] + ([0] * len(_lowercase ))
| 162
| 0
|
"""simple docstring"""
import math
import os
import sys
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = ""
try:
with open(A__ ,"rb" ) as binary_file:
UpperCAmelCase_ : Tuple = binary_file.read()
for dat in data:
UpperCAmelCase_ : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def snake_case ( A__ ,A__ ,A__ ,A__ ):
lexicon.pop(A__ )
UpperCAmelCase_ : List[str] = last_match_id
if math.loga(A__ ).is_integer():
for curr_key in lexicon:
UpperCAmelCase_ : Optional[Any] = "0" + lexicon[curr_key]
UpperCAmelCase_ : Dict = bin(A__ )[2:]
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = {"0": "0", "1": "1"}
UpperCAmelCase_ , UpperCAmelCase_ : Any = "", ""
UpperCAmelCase_ : Dict = len(A__ )
for i in range(len(A__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A__ ,A__ ,A__ ,A__ )
index += 1
UpperCAmelCase_ : List[str] = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCAmelCase_ : Union[str, Any] = lexicon[curr_string]
result += last_match_id
return result
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Dict = os.path.getsize(A__ )
UpperCAmelCase_ : int = bin(A__ )[2:]
UpperCAmelCase_ : Optional[int] = len(A__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = 8
try:
with open(A__ ,"wb" ) as opened_file:
UpperCAmelCase_ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 ,len(A__ ) ,A__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A__ ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = read_file_binary(A__ )
UpperCAmelCase_ : List[str] = compress_data(A__ )
UpperCAmelCase_ : Tuple = add_file_length(A__ ,A__ )
write_file_binary(A__ ,A__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 95
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : List[Any] = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = 20
lowerCamelCase : List[Any] = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
lowerCamelCase : Optional[int] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase : List[Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase : List[Any] = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
lowerCamelCase : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase : List[Any] = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
lowerCamelCase : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = None
lowerCamelCase : List[str] = 10
lowerCamelCase : Optional[int] = 2
# create ramp distribution
lowerCamelCase : Dict = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase : Union[str, Any] = 5
lowerCamelCase : Tuple = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase : Union[str, Any] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase : Union[str, Any] = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 10
lowerCamelCase : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase : Dict = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase : List[str] = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase : List[str] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase : str = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase : Optional[Any] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = 20
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
lowerCamelCase : str = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = 15
lowerCamelCase : str = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Any = 20
lowerCamelCase : List[str] = 4
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase : Any = 1
lowerCamelCase : Tuple = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase : str = 3
lowerCamelCase : Union[str, Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = 20
lowerCamelCase : str = 4
lowerCamelCase : str = 0
lowerCamelCase : Any = 5
lowerCamelCase : Optional[int] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase : Any = 4
lowerCamelCase : Any = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase : Optional[Any] = 3
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : str = 4
lowerCamelCase : List[str] = 10
lowerCamelCase : List[str] = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : List[Any] = 1
lowerCamelCase : str = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Tuple = input_ids.copy()
lowerCamelCase : List[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = scores.copy()
# instantiate all dist processors
lowerCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : str = FlaxTopKLogitsWarper(3 )
lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[int] = 10
# no processor list
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
lowerCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Union[str, Any] = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = 4
lowerCamelCase : Optional[int] = 10
lowerCamelCase : Dict = 15
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Dict = 1
lowerCamelCase : Optional[Any] = 15
# dummy input_ids and scores
lowerCamelCase : List[Any] = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Any = input_ids.copy()
lowerCamelCase : Dict = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = scores.copy()
# instantiate all dist processors
lowerCamelCase : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCamelCase : List[str] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Dict = 10
# no processor list
def run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : int = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Tuple = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
lowerCamelCase : Dict = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[int] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 311
| 0
|
'''simple docstring'''
from PIL import Image
def __UpperCAmelCase ( a_: Image, a_: int ):
_UpperCAmelCase : Union[str, Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(a_: int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(a_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__a = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 705
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = DebertaVaTokenizer
UpperCamelCase_ : List[str] = DebertaVaTokenizerFast
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "this is a test"
_UpperCAmelCase : Dict = "this is a test"
return input_text, output_text
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = "<pad>"
_UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0_0_0_1 )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_UpperCAmelCase : Dict = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[str] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : List[str] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_UpperCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "This is a test"
_UpperCAmelCase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_UpperCAmelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : int = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_UpperCAmelCase : Optional[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = DebertaVaTokenizer(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" )
_UpperCAmelCase : int = tokenizer.encode("multi-sequence build" )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 257
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103
|
from __future__ import annotations
def A ( lowercase__ : int ) -> list[int]:
UpperCamelCase__ :Union[str, Any] = [True] * limit
UpperCamelCase__ :int = False
UpperCamelCase__ :Optional[Any] = False
UpperCamelCase__ :str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
UpperCamelCase__ :List[Any] = i * 2
while index < limit:
UpperCamelCase__ :Tuple = False
UpperCamelCase__ :Tuple = index + i
UpperCamelCase__ :str = [2]
for i in range(3 , lowercase__ , 2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def A ( lowercase__ : int = 100_0000 ) -> int:
UpperCamelCase__ :Any = prime_sieve(lowercase__ )
UpperCamelCase__ :Optional[int] = 0
UpperCamelCase__ :Optional[Any] = 0
for i in range(len(lowercase__ ) ):
for j in range(i + length , len(lowercase__ ) ):
UpperCamelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ :Union[str, Any] = j - i
UpperCamelCase__ :Any = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase : List[Any] = [p / w for p, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase : List[str] = sorted(lowerCAmelCase__ )
# declaring useful variables
_lowerCAmelCase : List[str] = len(lowerCAmelCase__ )
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase : Optional[Any] = sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase : Union[str, Any] = profit_by_weight.index(lowerCAmelCase__ )
_lowerCAmelCase : Any = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
_lowerCAmelCase = [int(x) for x in input("""Input profits separated by spaces: """).split()]
_lowerCAmelCase = [int(x) for x in input("""Input weights separated by spaces: """).split()]
_lowerCAmelCase = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 705
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X20000 and cp <= 0X2A6DF) #
or (cp >= 0X2A700 and cp <= 0X2B73F) #
or (cp >= 0X2B740 and cp <= 0X2B81F) #
or (cp >= 0X2B820 and cp <= 0X2CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2F800 and cp <= 0X2FA1F) #
): #
return True
return False
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : Dict = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = set()
for token in tokens:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Optional[Any] = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : str = bert_tokens
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : Dict = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : str = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Tuple = '##' + bert_word[j]
_lowerCAmelCase : Optional[int] = start + i
_lowerCAmelCase : Any = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Tuple = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : List[Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[int] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Any = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : List[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowerCAmelCase : Any = [json.dumps(_lowerCamelCase ) + '\n' for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_lowerCAmelCase = parser.parse_args()
main(args)
| 16
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCamelCase_ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCamelCase_ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCamelCase_ , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCamelCase_ , default=0 , help='cuda_id.' , )
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
return args
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
if not len(lowerCamelCase_ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = imgs[0].size
SCREAMING_SNAKE_CASE_ : int = Image.new('RGB' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = grid.size
for i, img in enumerate(lowerCamelCase_ ):
grid.paste(lowerCamelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]="robotic cat with wings" , lowerCamelCase_ : Optional[int]=7.5 , lowerCamelCase_ : Any=50 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Union[str, Any]=42 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , generator=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , ).images
SCREAMING_SNAKE_CASE_ : Dict = int(math.sqrt(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_grid(lowerCamelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCamelCase__ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
UpperCamelCase__ : Dict = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
UpperCamelCase__ : Union[str, Any] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
UpperCamelCase__ : Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
UpperCamelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCamelCase__ : int = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
UpperCamelCase__ : Any = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
UpperCamelCase__ : Any = unet.to(torch.device('''cuda''', args.cuda_id))
UpperCamelCase__ : Union[str, Any] = pipeline.to(unet.device)
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
UpperCamelCase__ : Optional[Any] = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 105
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = StableDiffusionDiffEditPipeline
__A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
__A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
__A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__A = frozenset([] )
def __lowerCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , )
snake_case_ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
snake_case_ = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_zero=__UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(__UpperCamelCase )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
snake_case_ = floats_tensor((1, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case_ = torch.manual_seed(__UpperCamelCase )
else:
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case_ = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case_ = torch.manual_seed(__UpperCamelCase )
else:
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case_ = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=0 ):
"""simple docstring"""
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' )
if str(__UpperCamelCase ).startswith('mps' ):
snake_case_ = torch.manual_seed(__UpperCamelCase )
else:
snake_case_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
snake_case_ = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self ):
"""simple docstring"""
if not hasattr(self.pipeline_class , '_optional_components' ):
return
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
snake_case_ = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ = pipe(**__UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__UpperCamelCase )
snake_case_ = self.pipeline_class.from_pretrained(__UpperCamelCase )
pipe_loaded.to(__UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=__UpperCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__UpperCamelCase , __UpperCamelCase ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
snake_case_ = self.get_dummy_inputs(__UpperCamelCase )
snake_case_ = pipe_loaded(**__UpperCamelCase )[0]
snake_case_ = np.abs(output - output_loaded ).max()
self.assertLess(__UpperCamelCase , 1E-4 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = self.get_dummy_mask_inputs(__UpperCamelCase )
snake_case_ = pipe.generate_mask(**__UpperCamelCase )
snake_case_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
snake_case_ = np.array([0] * 9 )
snake_case_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = self.get_dummy_inversion_inputs(__UpperCamelCase )
snake_case_ = pipe.invert(**__UpperCamelCase ).images
snake_case_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
snake_case_ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
snake_case_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = 'cpu'
snake_case_ = self.get_dummy_components()
snake_case_ = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
snake_case_ = DPMSolverMultistepScheduler(**__UpperCamelCase )
snake_case_ = DPMSolverMultistepInverseScheduler(**__UpperCamelCase )
snake_case_ = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = self.get_dummy_inversion_inputs(__UpperCamelCase )
snake_case_ = pipe.invert(**__UpperCamelCase ).images
snake_case_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
snake_case_ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
snake_case_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCamelCase , 1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __lowerCAmelCase ( cls ):
"""simple docstring"""
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
snake_case_ = raw_image.convert('RGB' ).resize((7_68, 7_68) )
snake_case_ = raw_image
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case_ = DDIMScheduler.from_config(pipe.scheduler.config )
snake_case_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = 'a bowl of fruit'
snake_case_ = 'a bowl of pears'
snake_case_ = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
snake_case_ = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase ).latents
snake_case_ = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
snake_case_ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = torch.manual_seed(0 )
snake_case_ = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa )
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
snake_case_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
snake_case_ = 'a bowl of fruit'
snake_case_ = 'a bowl of pears'
snake_case_ = pipe.generate_mask(
image=self.raw_image , source_prompt=__UpperCamelCase , target_prompt=__UpperCamelCase , generator=__UpperCamelCase , )
snake_case_ = pipe.invert(
prompt=__UpperCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__UpperCamelCase , num_inference_steps=25 , ).latents
snake_case_ = pipe(
prompt=__UpperCamelCase , mask_image=__UpperCamelCase , image_latents=__UpperCamelCase , generator=__UpperCamelCase , negative_prompt=__UpperCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
snake_case_ = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 187
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase = None
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = '''▁'''
UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
UpperCamelCase = {
'''google/pegasus-xsum''': 512,
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : Any = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Any = PegasusTokenizer
__snake_case : str = ["input_ids", "attention_mask"]
def __init__( self: Tuple , UpperCAmelCase_: int=None , UpperCAmelCase_: str=None , UpperCAmelCase_: List[str]="<pad>" , UpperCAmelCase_: Dict="</s>" , UpperCAmelCase_: str="<unk>" , UpperCAmelCase_: Optional[int]="<mask_2>" , UpperCAmelCase_: Optional[int]="<mask_1>" , UpperCAmelCase_: List[Any]=None , UpperCAmelCase_: Union[str, Any]=103 , **UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
F'additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'
F' {type(UpperCAmelCase_ )}' )
_SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'<unk_{i}>' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
_SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
_SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'<unk_{i}>' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
F' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self: Any , UpperCAmelCase_: List , UpperCAmelCase_: Optional[List] = None , UpperCAmelCase_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase ( self: str , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, Any]=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 709
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = "ctrl"
__snake_case : Dict = ["past_key_values"]
__snake_case : List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[Any] , UpperCAmelCase_: int=246_534 , UpperCAmelCase_: List[Any]=256 , UpperCAmelCase_: int=1_280 , UpperCAmelCase_: str=8_192 , UpperCAmelCase_: Optional[Any]=48 , UpperCAmelCase_: Optional[Any]=16 , UpperCAmelCase_: Optional[int]=0.1 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Union[str, Any]=1E-6 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Dict=True , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = dff
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase_ )
| 569
| 0
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : int = 100_0000 ):
'''simple docstring'''
lowercase__ : List[Any] = set(range(3 , _lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , _lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _lowerCAmelCase , _lowerCAmelCase ) ) )
lowercase__ : List[str] = [float(_lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 599
|
"""simple docstring"""
class UpperCAmelCase_ :
def __init__( self , a , a , a ) -> List[Any]:
lowercase__ : List[str] = name
lowercase__ : List[str] = value
lowercase__ : Tuple = weight
def __repr__( self ) -> Any:
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def _UpperCAmelCase ( self ) -> Any:
return self.value
def _UpperCAmelCase ( self ) -> int:
return self.name
def _UpperCAmelCase ( self ) -> str:
return self.weight
def _UpperCAmelCase ( self ) -> int:
return self.value / self.weight
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for i in range(len(_lowerCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase )
lowercase__ : int = []
lowercase__ , lowercase__ : Dict = 0.0, 0.0
for i in range(len(_lowerCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 599
| 1
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
A = torch.nn.Linear(1_0 ,1_0 )
A = torch.optim.SGD(model.parameters() ,0.1 )
A = Accelerator()
A = accelerator.prepare(lowerCamelCase_ )
try:
pickle.loads(pickle.dumps(lowerCamelCase_ ) )
except Exception as e:
self.fail(f'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 255
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = (DDPMParallelScheduler,)
def UpperCamelCase__ ( self ,**lowerCamelCase_ ) -> List[Any]:
A = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase_ )
return config
def UpperCamelCase__ ( self ) -> Tuple:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Dict:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_ ,beta_end=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_ ,prediction_type=lowerCamelCase_ ,sample_max_value=lowerCamelCase_ ,)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = self.dummy_sample_deter + 0.1
A = self.dummy_sample_deter - 0.1
A = samplea.shape[0]
A = torch.stack([samplea, samplea, samplea] ,dim=0 )
A = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1 ,lowerCamelCase_ )
A = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
A = scheduler.batch_step_no_noise(lowerCamelCase_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1E-2
assert abs(result_mean.item() - 0.50_05 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def UpperCamelCase__ ( self ) -> int:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type="""v_prediction""" )
A = scheduler_class(**lowerCamelCase_ )
A = len(lowerCamelCase_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
A = model(lowerCamelCase_ ,lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(lowerCamelCase_ ) )
A = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
A = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(lowerCamelCase_ )
A = prev_t.item()
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowerCamelCase_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [1_0_0, 8_7, 5_0, 1, 0]
A = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_ ,timesteps=lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**lowerCamelCase_ )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 255
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''SpeechT5FeatureExtractor'''
__lowercase : List[str] = '''SpeechT5Tokenizer'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
super().__init__(__UpperCAmelCase ,__UpperCAmelCase )
def __call__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> str:
lowerCAmelCase__ : List[str] = kwargs.pop("""audio""" ,__UpperCAmelCase )
lowerCAmelCase__ : Any = kwargs.pop("""text""" ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = kwargs.pop("""text_target""" ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = kwargs.pop("""audio_target""" ,__UpperCAmelCase )
lowerCAmelCase__ : Any = kwargs.pop("""sampling_rate""" ,__UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
lowerCAmelCase__ : Union[str, Any] = self.feature_extractor(__UpperCAmelCase ,*__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,**__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ : List[Any] = self.tokenizer(__UpperCAmelCase ,**__UpperCAmelCase )
else:
lowerCAmelCase__ : int = None
if audio_target is not None:
lowerCAmelCase__ : Tuple = self.feature_extractor(audio_target=__UpperCAmelCase ,*__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = targets["""input_values"""]
elif text_target is not None:
lowerCAmelCase__ : Tuple = self.tokenizer(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : str = targets["""input_ids"""]
else:
lowerCAmelCase__ : Any = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ : Dict = labels
lowerCAmelCase__ : Optional[Any] = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase__ : List[str] = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> int:
lowerCAmelCase__ : Union[str, Any] = kwargs.pop("""input_values""" ,__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop("""input_ids""" ,__UpperCAmelCase )
lowerCAmelCase__ : str = kwargs.pop("""labels""" ,__UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
lowerCAmelCase__ : Tuple = self.feature_extractor.pad(__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ : Optional[Any] = self.tokenizer.pad(__UpperCAmelCase ,**__UpperCAmelCase )
else:
lowerCAmelCase__ : Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ : str = self.tokenizer.pad(__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = targets["""input_ids"""]
else:
lowerCAmelCase__ : int = self.feature_extractor.feature_size
lowerCAmelCase__ : Optional[Any] = self.feature_extractor.num_mel_bins
lowerCAmelCase__ : Dict = self.feature_extractor.pad(__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = feature_size_hack
lowerCAmelCase__ : int = targets["""input_values"""]
else:
lowerCAmelCase__ : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ : Optional[Any] = labels
lowerCAmelCase__ : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
lowerCAmelCase__ : Any = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
return self.tokenizer.decode(*__UpperCAmelCase ,**__UpperCAmelCase )
| 565
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = get_dataset(UpperCamelCase , UpperCamelCase )
print("""Processing...""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = update_image_and_anno(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for index, image in enumerate(UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ : List[Any] = random_chars(32 )
lowerCAmelCase__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase__ : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(UpperCamelCase )} with {file_name}""" )
lowerCAmelCase__ : Tuple = []
for anno in new_annos[index]:
lowerCAmelCase__ : Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase )
with open(f"""/{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(UpperCamelCase , """*.txt""" ) ):
lowerCAmelCase__ : Tuple = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase ) as in_file:
lowerCAmelCase__ : Any = in_file.readlines()
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , f"""{label_name}.jpg""" )
lowerCAmelCase__ : Tuple = []
for obj_list in obj_lists:
lowerCAmelCase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[str] = []
for idx in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Optional[int] = img_list[idx]
path_list.append(UpperCamelCase )
lowerCAmelCase__ : List[Any] = anno_list[idx]
lowerCAmelCase__ : Dict = cva.imread(UpperCamelCase )
if flip_type == 1:
lowerCAmelCase__ : List[str] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCAmelCase__ : Union[str, Any] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase )
new_imgs_list.append(UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 565
| 1
|
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__snake_case: List[str] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = 8 , **lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
a_ : Optional[Any] = do_rescale
a_ : Any = rescale_factor
a_ : Optional[int] = do_pad
a_ : Union[str, Any] = pad_size
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ):
'''simple docstring'''
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : Any = get_image_size(lowerCAmelCase_ )
a_ : Union[str, Any] = (old_height // size + 1) * size - old_height
a_ : str = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ):
'''simple docstring'''
a_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
a_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ : str = do_pad if do_pad is not None else self.do_pad
a_ : Optional[Any] = pad_size if pad_size is not None else self.pad_size
a_ : Dict = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
a_ : Any = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
a_ : Optional[int] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
a_ : Any = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
a_ : Dict = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
a_ : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 707
|
'''simple docstring'''
def _snake_case ( A_ : list ):
"""simple docstring"""
if len(A_ ) <= 1:
return [tuple(A_ )]
a_ : List[Any] = []
def generate(A_ : int , A_ : list ):
a_ : List[Any] = [0] * n
res.append(tuple(A_ ) )
a_ : List[str] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
a_ , a_ : Union[str, Any] = arr[i], arr[0]
else:
a_ , a_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(A_ ) )
c[i] += 1
a_ : Optional[Any] = 0
else:
a_ : Union[str, Any] = 0
i += 1
generate(len(A_ ) , A_ )
return res
if __name__ == "__main__":
__snake_case: Dict = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Optional[Any] = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 460
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = "altclip_text_model"
def __init__( self , _A=250002 , _A=1024 , _A=24 , _A=16 , _A=4096 , _A="gelu" , _A=0.1 , _A=0.1 , _A=514 , _A=1 , _A=0.02 , _A=0.02 , _A=1e-05 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=768 , **_A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Tuple = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : int = initializer_factor
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : List[str] = position_embedding_type
_UpperCAmelCase : Union[str, Any] = use_cache
_UpperCAmelCase : int = project_dim
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = "altclip_vision_model"
def __init__( self , _A=768 , _A=3072 , _A=512 , _A=12 , _A=12 , _A=3 , _A=224 , _A=32 , _A="quick_gelu" , _A=1e-5 , _A=0.0 , _A=0.02 , _A=1.0 , **_A , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_A)
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : int = projection_dim
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = initializer_factor
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = hidden_act
@classmethod
def snake_case__ ( cls , _A , **_A) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(_A)
_UpperCAmelCase : Union[str, Any] = cls.get_config_dict(_A , **_A)
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''') == "altclip":
_UpperCAmelCase : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(_A , **_A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = "altclip"
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def __init__( self , _A=None , _A=None , _A=768 , _A=2.6592 , **_A) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = kwargs.pop('''text_config_dict''' , _A)
_UpperCAmelCase : int = kwargs.pop('''vision_config_dict''' , _A)
super().__init__(**_A)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_UpperCAmelCase : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
_UpperCAmelCase : Optional[Any] = AltCLIPTextConfig(**_A).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_UpperCAmelCase : Union[str, Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase : int = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config[\"{key}\"]` will be overriden.'''
)
logger.warning(_A)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
_UpperCAmelCase : int = {}
# This is the complete result when using `vision_config_dict`.
_UpperCAmelCase : Tuple = AltCLIPVisionConfig(**_A).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_UpperCAmelCase : Optional[Any] = {
str(_A): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_UpperCAmelCase : str = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict[\"{key}\"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
_UpperCAmelCase : List[Any] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config[\"{key}\"]` will be overriden.'''
)
logger.warning(_A)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
_UpperCAmelCase : Any = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''')
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''')
_UpperCAmelCase : Union[str, Any] = AltCLIPTextConfig(**_A)
_UpperCAmelCase : str = AltCLIPVisionConfig(**_A)
_UpperCAmelCase : str = projection_dim
_UpperCAmelCase : Any = logit_scale_init_value
_UpperCAmelCase : Union[str, Any] = 1.0
@classmethod
def snake_case__ ( cls , _A , _A , **_A) -> List[str]:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A)
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__)
_UpperCAmelCase : int = self.text_config.to_dict()
_UpperCAmelCase : Any = self.vision_config.to_dict()
_UpperCAmelCase : Any = self.__class__.model_type
return output
| 485
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A ( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : List[str] = CLIPTextModel(_A )
UpperCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : List[str] , _A : Tuple , _A : str=0 ) -> int:
UpperCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : str = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : int = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Tuple = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : str = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**_A ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : int ) -> str:
UpperCAmelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : str = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = '''french fries'''
UpperCAmelCase_ : Optional[Any] = sd_pipe(**_A , negative_prompt=_A )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : Dict ) -> str:
UpperCAmelCase_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : int = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_A )
UpperCAmelCase_ : Any = [inputs['''prompt''']] * 2
UpperCAmelCase_ : int = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0
UpperCAmelCase_ : int = torch.from_numpy(_A ).unsqueeze(0 ).to(_A )
UpperCAmelCase_ : List[str] = image / 2 + 0.5
UpperCAmelCase_ : str = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Any = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ : List[Any] = sd_pipe(**_A ).images
UpperCAmelCase_ : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Dict = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = sd_pipe(**_A ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = [round(_A , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(_A ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A ( self : str ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : Tuple = StableDiffusionInstructPixaPixPipeline(**_A )
UpperCAmelCase_ : Any = VaeImageProcessor(do_resize=_A , do_normalize=_A )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : int = pipe(**self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' ) )[0]
UpperCAmelCase_ : Union[str, Any] = components['''vae''']
UpperCAmelCase_ : Any = self.get_dummy_inputs_by_type(_A , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ : int = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ : List[Any] = pipe(**_A )[0]
UpperCAmelCase_ : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(_A , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
def A ( self : Any ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str] , _A : Optional[Any]=0 ) -> List[Any]:
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCAmelCase_ : Optional[int] = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[str] = self.get_inputs()
UpperCAmelCase_ : int = pipe(**_A ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Any = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
UpperCAmelCase_ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[Any] = self.get_inputs()
UpperCAmelCase_ : str = pipe(**_A ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A )
UpperCAmelCase_ : str = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Any = self.get_inputs()
UpperCAmelCase_ : Any = pipe(**_A ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Dict = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : List[str] = 0
def callback_fn(_A : int , _A : int , _A : torch.FloatTensor ) -> None:
UpperCAmelCase_ : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ : Union[str, Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Dict = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : Any = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Dict = self.get_inputs()
pipe(**_A , callback=_A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A ( self : int ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Dict = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**_A )
UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A ( self : str ) -> int:
UpperCAmelCase_ : List[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ : List[str] = inputs['''image'''].resize((5_04, 5_04) )
UpperCAmelCase_ : int = '''timbrooks/instruct-pix2pix'''
UpperCAmelCase_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_A , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[Any] = pipe(**_A )
UpperCAmelCase_ : Union[str, Any] = output.images[0]
UpperCAmelCase_ : List[str] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
UpperCAmelCase_ : List[str] = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 541
| 0
|
"""simple docstring"""
import torch
def _lowerCAmelCase ( ) ->Optional[int]:
if torch.cuda.is_available():
A__ : str = torch.cuda.device_count()
else:
A__ : str = 0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 704
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ) ->str:
for attribute in key.split(""".""" ):
A__ : Any = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A__ : Union[str, Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ : List[Any] = value
elif weight_type == "weight_g":
A__ : Any = value
elif weight_type == "weight_v":
A__ : Optional[int] = value
elif weight_type == "bias":
A__ : Any = value
else:
A__ : Any = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->Union[str, Any]:
A__ : Dict = []
A__ : int = fairseq_model.state_dict()
A__ : Dict = hf_model.feature_extractor
A__ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
A__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A__ : Union[str, Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : List[Any] = True
if "*" in mapped_key:
A__ : Union[str, Any] = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A__ : Union[str, Any] = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A__ : List[str] = """weight_g"""
elif "weight_v" in name:
A__ : Optional[Any] = """weight_v"""
elif "bias" in name:
A__ : Optional[int] = """bias"""
elif "weight" in name:
A__ : int = """weight"""
else:
A__ : Optional[Any] = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->List[Any]:
A__ : Dict = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : int = int(items[0] )
A__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->str:
A__ : Tuple = full_name.split("""adaptor.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
A__ : Optional[Any] = int(items[1] )
else:
A__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A__ : Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A__ : str = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A__ : Dict = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A__ : Optional[int] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[Any]:
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : List[str] = nn.Linear(UpperCAmelCase__, UpperCAmelCase__, bias=UpperCAmelCase__ )
A__ : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, ) ->str:
A__ : Tuple = WavaVecaConfig.from_pretrained(
UpperCAmelCase__, add_adapter=UpperCAmelCase__, adapter_stride=UpperCAmelCase__, adapter_kernel_size=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, output_hidden_size=UpperCAmelCase__, )
A__ : List[Any] = MBartConfig.from_pretrained(UpperCAmelCase__ )
# load model
A__ , A__ , A__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
}, )
A__ : List[Any] = model[0].eval()
# load feature extractor
A__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__, use_auth_token=UpperCAmelCase__ )
# set weights for wav2vec2 encoder
A__ : Dict = WavaVecaModel(UpperCAmelCase__ )
recursively_load_weights_wavaveca(model.encoder, UpperCAmelCase__ )
# load decoder weights
A__ : Any = MBartForCausalLM(UpperCAmelCase__ )
A__ , A__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=UpperCAmelCase__ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A__ : Dict = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__, decoder=UpperCAmelCase__ )
A__ : Optional[Any] = False
A__ : Optional[Any] = MBartaaTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(UpperCAmelCase__ )
A__ : Dict = hf_wavavec.config.to_dict()
A__ : List[Any] = tokenizer.pad_token_id
A__ : Optional[Any] = tokenizer.bos_token_id
A__ : List[Any] = tokenizer.eos_token_id
A__ : Tuple = """mbart50"""
A__ : List[str] = """wav2vec2"""
A__ : Optional[int] = tokenizer.eos_token_id
A__ : int = 2_5_0_0_0_4
A__ : Dict = tokenizer.eos_token_id
A__ : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 498
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'sentencepiece.model'}
UpperCAmelCase = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
UpperCAmelCase = {
'google/rembert': 256,
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_=False , A_=True , A_=True , A_="[CLS]" , A_="[SEP]" , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , **A_ , ) -> Union[str, Any]:
super().__init__(
do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(A_ )
@property
def __snake_case ( self ) -> str:
return len(self.sp_model )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , A_ ) -> Union[str, Any]:
lowerCAmelCase = d
lowerCAmelCase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , A_ , A_=False ) -> Union[str, Any]:
lowerCAmelCase = self.sp_model.EncodeAsPieces(A_ )
return pieces
def __snake_case ( self , A_ ) -> List[Any]:
return self.sp_model.PieceToId(A_ )
def __snake_case ( self , A_ ) -> Optional[int]:
return self.sp_model.IdToPiece(A_ )
def __snake_case ( self , A_ ) -> Tuple:
lowerCAmelCase = self.sp_model.decode_pieces(A_ )
return out_string
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def __snake_case ( self , A_ , A_ = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error("""Vocabulary path ({}) should be a directory""".format(A_ ) )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 433
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = "git_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=12 , A_=12 , A_=3 , A_=224 , A_=16 , A_="quick_gelu" , A_=1e-5 , A_=0.0 , A_=0.0_2 , **A_ , ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_dropout
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
@classmethod
def __snake_case ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
lowerCAmelCase, lowerCAmelCase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = "git"
def __init__( self , A_=None , A_=3_0522 , A_=768 , A_=6 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1024 , A_=0.0_2 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , A_=False , A_=101 , A_=102 , A_=None , **A_ , ) -> Tuple:
super().__init__(bos_token_id=A_ , eos_token_id=A_ , pad_token_id=A_ , **A_ )
if vision_config is None:
lowerCAmelCase = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
lowerCAmelCase = GitVisionConfig(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = num_image_with_embedding
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
| 433
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Any = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672
| 0
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_snake_case = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
_snake_case = logging.WARNING
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = os.getenv('DATASETS_VERBOSITY' , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def __snake_case ( ):
"""simple docstring"""
return __name__.split('.' )[0]
def __snake_case ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[str] = None ):
"""simple docstring"""
if name is None:
_lowerCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
return set_verbosity(SCREAMING_SNAKE_CASE )
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = False
def __snake_case ( ):
"""simple docstring"""
_lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str ) -> str: # pylint: disable=unused-argument
"""simple docstring"""
_lowerCAmelCase = args[0] if args else None
def __iter__( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : List[str] , UpperCAmelCase_ : Tuple ) -> str:
"""simple docstring"""
def empty_fn(*UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Tuple ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : int ) -> Optional[int]:
"""simple docstring"""
return self
def __exit__( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
_snake_case = True
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : List[Any] , *UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
return EmptyTqdm(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Tuple ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_snake_case = _tqdm_cls()
def __snake_case ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def __snake_case ( ):
"""simple docstring"""
global _tqdm_active
_lowerCAmelCase = True
def __snake_case ( ):
"""simple docstring"""
global _tqdm_active
_lowerCAmelCase = False
| 580
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __snake_case ( SCREAMING_SNAKE_CASE: List[str] ):
"""simple docstring"""
_lowerCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE_: Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE_: Optional[Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE_: Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_: Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_: Optional[int] = frozenset([] )
SCREAMING_SNAKE_CASE_: List[str] = True
@property
def __lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = 1
_lowerCAmelCase = 4
_lowerCAmelCase = (16, 16)
_lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
def __lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=UpperCAmelCase_ , only_cross_attention=UpperCAmelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_lowerCAmelCase = EulerDiscreteScheduler(prediction_type='sample' )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='quick_gelu' , projection_dim=512 , )
_lowerCAmelCase = CLIPTextModel(UpperCAmelCase_ )
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
if str(UpperCAmelCase_ ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
_lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase = pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCAmelCase = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 )
def __lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**UpperCAmelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase = 2
_lowerCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase = getattr(UpperCAmelCase_ , scheduler_enum.name )
_lowerCAmelCase = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase = pipe(**UpperCAmelCase_ )[0]
outputs.append(UpperCAmelCase_ )
assert check_same_shape(UpperCAmelCase_ )
@require_torch_gpu
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(33 )
_lowerCAmelCase = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowerCAmelCase = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='latent' ).images
_lowerCAmelCase = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(33 )
_lowerCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_lowerCAmelCase = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 580
| 1
|
'''simple docstring'''
import os
import sys
import transformers
lowercase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 708
|
'''simple docstring'''
from collections.abc import Generator
def UpperCAmelCase_ ( ):
'''simple docstring'''
a_ , a_ =0, 1
while True:
a_ , a_ =b, a + b
yield b
def UpperCAmelCase_ ( lowercase__ = 1_0_0_0 ):
'''simple docstring'''
a_ =1
a_ =fibonacci_generator()
while len(str(next(lowercase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 41
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
__A : int
__A : jnp.dtype = jnp.floataa
def UpperCamelCase( self ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _UpperCamelCase ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_states.shape
_UpperCAmelCase = jax.image.resize(
_UpperCamelCase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
_UpperCAmelCase = self.conv(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
__A : int
__A : jnp.dtype = jnp.floataa
def UpperCamelCase( self ):
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _UpperCamelCase ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_UpperCAmelCase = self.conv(_UpperCamelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
__A : int
__A : int = None
__A : float = 0.0
__A : bool = None
__A : jnp.dtype = jnp.floataa
def UpperCamelCase( self ):
_UpperCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Conv(
_UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = nn.Dense(_UpperCamelCase , dtype=self.dtype )
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Dropout(self.dropout_prob )
_UpperCAmelCase = nn.Conv(
_UpperCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_UpperCAmelCase = None
if use_nin_shortcut:
_UpperCAmelCase = nn.Conv(
_UpperCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
_UpperCAmelCase = hidden_states
_UpperCAmelCase = self.norma(_UpperCamelCase )
_UpperCAmelCase = nn.swish(_UpperCamelCase )
_UpperCAmelCase = self.conva(_UpperCamelCase )
_UpperCAmelCase = self.time_emb_proj(nn.swish(_UpperCamelCase ) )
_UpperCAmelCase = jnp.expand_dims(jnp.expand_dims(_UpperCamelCase , 1 ) , 1 )
_UpperCAmelCase = hidden_states + temb
_UpperCAmelCase = self.norma(_UpperCamelCase )
_UpperCAmelCase = nn.swish(_UpperCamelCase )
_UpperCAmelCase = self.dropout(_UpperCamelCase , _UpperCamelCase )
_UpperCAmelCase = self.conva(_UpperCamelCase )
if self.conv_shortcut is not None:
_UpperCAmelCase = self.conv_shortcut(_UpperCamelCase )
return hidden_states + residual
| 32
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = '''focalnet'''
def __init__( self , _lowercase=224 , _lowercase=4 , _lowercase=3 , _lowercase=96 , _lowercase=False , _lowercase=[192, 384, 768, 768] , _lowercase=[2, 2, 6, 2] , _lowercase=[2, 2, 2, 2] , _lowercase=[3, 3, 3, 3] , _lowercase="gelu" , _lowercase=4.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=False , _lowercase=1e-4 , _lowercase=False , _lowercase=False , _lowercase=False , _lowercase=0.02 , _lowercase=1e-5 , _lowercase=32 , _lowercase=None , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = use_conv_embed
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = focal_levels
_lowerCAmelCase = focal_windows
_lowerCAmelCase = hidden_act
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = use_layerscale
_lowerCAmelCase = layerscale_value
_lowerCAmelCase = use_post_layernorm
_lowerCAmelCase = use_post_layernorm_in_modulation
_lowerCAmelCase = normalize_modulator
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names )
| 162
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[Any] = DDIMPipeline
_lowercase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_lowercase : Any = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_lowercase : Tuple = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_lowercase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {"""unet""": unet, """scheduler""": scheduler}
return components
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = self.get_dummy_inputs(_lowercase )
_lowerCAmelCase = pipe(**_lowercase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_lowerCAmelCase = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """google/ddpm-cifar10-32"""
_lowerCAmelCase = UNetaDModel.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddim.to(_lowercase )
ddim.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddim(generator=_lowercase , eta=0.0 , output_type="""numpy""" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """google/ddpm-ema-bedroom-256"""
_lowerCAmelCase = UNetaDModel.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMScheduler.from_pretrained(_lowercase )
_lowerCAmelCase = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddpm.to(_lowercase )
ddpm.set_progress_bar_config(disable=_lowercase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = ddpm(generator=_lowercase , output_type="""numpy""" ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 162
| 1
|
def snake_case (UpperCamelCase : Optional[int] ):
'''simple docstring'''
return "".join([hex(__lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCAmelCase )] )
def snake_case (UpperCamelCase : List[str] ):
'''simple docstring'''
if (len(__lowerCAmelCase ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowerCAmelCase ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> list[float]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ : Tuple = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if colsa != 1:
SCREAMING_SNAKE_CASE__ : str = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__lowerCAmelCase )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__lowerCAmelCase )
if len(__lowerCAmelCase ) != rowsa:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(__lowerCAmelCase )} and {rowsa}'''
)
raise ValueError(__lowerCAmelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = table.shape
strictly_diagonally_dominant(__lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = []
for row in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = 0
for col in range(__lowerCAmelCase ):
if col == row:
SCREAMING_SNAKE_CASE__ : int = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ : Any = (temp + val) / denom
new_val.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = new_val
return [float(__lowerCAmelCase ) for i in new_val]
def _lowercase ( __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = table.shape
SCREAMING_SNAKE_CASE__ : str = True
for i in range(0 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
A = {
"""facebook/mbart-large-en-ro""": 1_024,
"""facebook/mbart-large-cc25""": 1_024,
}
# fmt: off
A = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = MBartTokenizer
lowercase_ = []
lowercase_ = []
def __init__( self : int , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : Union[str, Any]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : Any="<mask>" , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
__UpperCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[Any] = vocab_file
__UpperCAmelCase : Dict = False if not self.vocab_file else True
__UpperCAmelCase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
__UpperCAmelCase : List[str] = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__UpperCAmelCase : Tuple = src_lang if src_lang is not None else "en_XX"
__UpperCAmelCase : List[str] = self.convert_tokens_to_ids(self._src_lang)
__UpperCAmelCase : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def a_ ( self : str):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a_ ( self : Dict , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : int = [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
__UpperCAmelCase : Dict = src_lang
__UpperCAmelCase : int = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_)
__UpperCAmelCase : Any = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : str = tgt_lang_id
return inputs
def a_ ( self : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str = "en_XX" , UpperCamelCase_ : Optional[List[str]] = None , UpperCamelCase_ : str = "ro_RO" , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = src_lang
__UpperCAmelCase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : str):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def a_ ( self : int):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def a_ ( self : List[Any] , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : int = self.convert_tokens_to_ids(UpperCamelCase_)
__UpperCAmelCase : int = []
__UpperCAmelCase : List[str] = [self.eos_token_id, self.cur_lang_code]
__UpperCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
__UpperCAmelCase : int = self.convert_ids_to_tokens(self.suffix_tokens)
__UpperCAmelCase : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(UpperCamelCase_):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
__UpperCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_):
copyfile(self.vocab_file , UpperCamelCase_)
return (out_vocab_file,)
| 487
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : List[str] = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Any = cos(UpperCamelCase )
__UpperCAmelCase : Tuple = _sin / (2 * q_factor)
__UpperCAmelCase : Optional[Any] = (1 - _cos) / 2
__UpperCAmelCase : Any = 1 - _cos
__UpperCAmelCase : List[str] = 1 + alpha
__UpperCAmelCase : List[Any] = -2 * _cos
__UpperCAmelCase : Dict = 1 - alpha
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : int = tau * frequency / samplerate
__UpperCAmelCase : Optional[Any] = sin(UpperCamelCase )
__UpperCAmelCase : Dict = cos(UpperCamelCase )
__UpperCAmelCase : Tuple = _sin / (2 * q_factor)
__UpperCAmelCase : Dict = (1 + _cos) / 2
__UpperCAmelCase : Tuple = -1 - _cos
__UpperCAmelCase : Any = 1 + alpha
__UpperCAmelCase : int = -2 * _cos
__UpperCAmelCase : int = 1 - alpha
__UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Tuple = tau * frequency / samplerate
__UpperCAmelCase : Tuple = sin(UpperCamelCase )
__UpperCAmelCase : List[str] = cos(UpperCamelCase )
__UpperCAmelCase : List[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : Any = _sin / 2
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = -ba
__UpperCAmelCase : Dict = 1 + alpha
__UpperCAmelCase : List[str] = -2 * _cos
__UpperCAmelCase : int = 1 - alpha
__UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : List[Any] = sin(UpperCamelCase )
__UpperCAmelCase : Any = cos(UpperCamelCase )
__UpperCAmelCase : List[str] = _sin / (2 * q_factor)
__UpperCAmelCase : Optional[Any] = 1 - alpha
__UpperCAmelCase : Tuple = -2 * _cos
__UpperCAmelCase : List[Any] = 1 + alpha
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Dict = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = cos(UpperCamelCase )
__UpperCAmelCase : List[Any] = _sin / (2 * q_factor)
__UpperCAmelCase : List[Any] = 10 ** (gain_db / 40)
__UpperCAmelCase : List[str] = 1 + alpha * big_a
__UpperCAmelCase : Optional[Any] = -2 * _cos
__UpperCAmelCase : int = 1 - alpha * big_a
__UpperCAmelCase : Optional[int] = 1 + alpha / big_a
__UpperCAmelCase : Union[str, Any] = -2 * _cos
__UpperCAmelCase : Dict = 1 - alpha / big_a
__UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : str = tau * frequency / samplerate
__UpperCAmelCase : int = sin(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = cos(UpperCamelCase )
__UpperCAmelCase : str = _sin / (2 * q_factor)
__UpperCAmelCase : str = 10 ** (gain_db / 40)
__UpperCAmelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : List[str] = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha
__UpperCAmelCase : Tuple = big_a * (pmc + aaa)
__UpperCAmelCase : Union[str, Any] = 2 * big_a * mpc
__UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
__UpperCAmelCase : Any = ppmc + aaa
__UpperCAmelCase : Dict = -2 * pmpc
__UpperCAmelCase : Any = ppmc - aaa
__UpperCAmelCase : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
__UpperCAmelCase : Dict = tau * frequency / samplerate
__UpperCAmelCase : Optional[int] = sin(UpperCamelCase )
__UpperCAmelCase : Dict = cos(UpperCamelCase )
__UpperCAmelCase : str = _sin / (2 * q_factor)
__UpperCAmelCase : int = 10 ** (gain_db / 40)
__UpperCAmelCase : Optional[Any] = (big_a + 1) - (big_a - 1) * _cos
__UpperCAmelCase : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
__UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
__UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha
__UpperCAmelCase : Tuple = big_a * (ppmc + aaa)
__UpperCAmelCase : Any = -2 * big_a * pmpc
__UpperCAmelCase : int = big_a * (ppmc - aaa)
__UpperCAmelCase : int = pmc + aaa
__UpperCAmelCase : Tuple = 2 * mpc
__UpperCAmelCase : Union[str, Any] = pmc - aaa
__UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 487
| 1
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Any = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : str ):
__lowerCAmelCase = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
__lowerCAmelCase = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = 'mapillary-vistas-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def a_ ( lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ):
# fmt: off
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ):
__lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_, 'rb' ) as f:
__lowerCAmelCase = pickle.load(lowerCAmelCase_ )
__lowerCAmelCase = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_, param.shape )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if 'ade' in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ )
__lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' )
__lowerCAmelCase = model(**lowerCAmelCase_ )
print('Logits:', outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : List[str] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 53
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48
| 0
|
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" ,_snake_case )
if matches:
SCREAMING_SNAKE_CASE__ : Optional[Any] = float(matches[1] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
SCREAMING_SNAKE_CASE__ : int = 1_001
SCREAMING_SNAKE_CASE__ : List[Any] = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ : Any = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = """background"""
SCREAMING_SNAKE_CASE__ : int = idalabel
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Any = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=False ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case ,_snake_case ,_snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} ,size={"""shortest_edge""": config.image_size + 32} ,)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(images=prepare_img() ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
SCREAMING_SNAKE_CASE__ : str = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] ,_snake_case ,atol=1E-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = """google/""" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : Dict = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 545
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Tuple = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 545
| 1
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowerCamelCase = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
_lowerCamelCase = None
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_SCREAMING_SNAKE_CASE , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_SCREAMING_SNAKE_CASE , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ : Dict = bool(qa["answers"]["text"] )
return qid_to_has_ans
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
return ARTICLES_REGEX.sub(" " , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase_ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = get_tokens(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase_ : Dict = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase_ : Union[str, Any] = qa["id"]
UpperCAmelCase_ : str = [t for t in qa["answers"]["text"] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase_ : int = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCAmelCase_ : Dict = preds[qid]
# Take max over all gold answers
UpperCAmelCase_ : Any = max(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
UpperCAmelCase_ : str = max(compute_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : int = {}
for qid, s in scores.items():
UpperCAmelCase_ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase_ : Tuple = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase_ : Any = s
return new_scores
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=None ) -> int:
"""simple docstring"""
if not qid_list:
UpperCAmelCase_ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for k in new_eval:
UpperCAmelCase_ : int = new_eval[k]
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
plt.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
UpperCAmelCase_ : List[Any] = 0.0
UpperCAmelCase_ : Dict = 1.0
UpperCAmelCase_ : Union[str, Any] = 0.0
UpperCAmelCase_ : Dict = [1.0]
UpperCAmelCase_ : List[str] = [0.0]
UpperCAmelCase_ : Any = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase_ : Optional[Any] = true_pos / float(i + 1 )
UpperCAmelCase_ : Any = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase_ : Optional[int] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCAmelCase_ : Tuple = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCAmelCase_ : Union[str, Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase_ : Optional[int] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , out_image=os.path.join(_SCREAMING_SNAKE_CASE , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_exact" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_f1" )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "pr_oracle" )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
if not qid_list:
return
UpperCAmelCase_ : List[Any] = [na_probs[k] for k in qid_list]
UpperCAmelCase_ : Dict = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE , weights=_SCREAMING_SNAKE_CASE , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase_ : List[str] = num_no_ans
UpperCAmelCase_ : Optional[int] = cur_score
UpperCAmelCase_ : Any = 0.0
UpperCAmelCase_ : List[str] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase_ : Tuple = scores[qid]
else:
if preds[qid]:
UpperCAmelCase_ : int = -1
else:
UpperCAmelCase_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase_ : int = cur_score
UpperCAmelCase_ : Tuple = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : int = find_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = best_exact
UpperCAmelCase_ : int = exact_thresh
UpperCAmelCase_ : int = best_fa
UpperCAmelCase_ : Union[str, Any] = fa_thresh
def a__ ( ) -> str:
"""simple docstring"""
with open(OPTS.data_file ) as f:
UpperCAmelCase_ : Union[str, Any] = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCAmelCase_ : int = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase_ : Dict = json.load(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
UpperCAmelCase_ : Tuple = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase_ : Dict = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = get_raw_scores(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase_ : Dict = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.na_prob_thresh )
UpperCAmelCase_ : Optional[int] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if has_ans_qids:
UpperCAmelCase_ : Optional[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "HasAns" )
if no_ans_qids:
UpperCAmelCase_ : Any = make_eval_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 71
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int=13 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : str=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=224 , __lowerCamelCase : Any=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Dict ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__lowerCamelCase , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : int ):
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : Optional[int] ):
pass
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _snake_case ( self : Tuple ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : Union[str, Any] ):
pass
def _snake_case ( self : Optional[Any] ):
def check_hidden_states_output(__lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__lowerCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = copy.deepcopy(__lowerCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__lowerCamelCase , __lowerCamelCase , 1e-10 )
if isinstance(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(__lowerCamelCase , __lowerCamelCase ) )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return configs_no_init
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=__lowerCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : str ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 16
| 0
|
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCamelCase : str , lowerCamelCase : Dict ) -> list[tuple[int, int]]:
__magic_name__ : Optional[int] = position
__magic_name__ : Tuple = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__magic_name__ : Any = []
for position in positions:
__magic_name__ : Any = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCamelCase_ )
return permissible_positions
def UpperCamelCase_ ( lowerCamelCase : Union[str, Any] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def UpperCamelCase_ ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ) -> bool:
if is_complete(lowerCamelCase_ ):
return True
for position in get_valid_pos(lowerCamelCase_ , len(lowerCamelCase_ ) ):
__magic_name__ : str = position
if board[y][x] == 0:
__magic_name__ : Optional[Any] = curr + 1
if open_knight_tour_helper(lowerCamelCase_ , lowerCamelCase_ , curr + 1 ):
return True
__magic_name__ : List[str] = 0
return False
def UpperCamelCase_ ( lowerCamelCase : Dict ) -> list[list[int]]:
__magic_name__ : List[str] = [[0 for i in range(lowerCamelCase_ )] for j in range(lowerCamelCase_ )]
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
__magic_name__ : Optional[int] = 1
if open_knight_tour_helper(lowerCamelCase_ , (i, j) , 1 ):
return board
__magic_name__ : List[Any] = 0
__magic_name__ : Dict = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
A = datasets.utils.logging.get_logger(__name__)
A = ["""names""", """prefix"""]
A = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
A = ["""encoding_errors""", """on_bad_lines"""]
A = ["""date_format"""]
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case_ = ","
snake_case_ = None
snake_case_ = "infer"
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = False
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = None
snake_case_ = "."
snake_case_ = None
snake_case_ = '"'
snake_case_ = 0
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = True
snake_case_ = True
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = None
snake_case_ = 1_0_0_0_0
snake_case_ = None
snake_case_ = "strict"
snake_case_ = "error"
snake_case_ = None
def _UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
if self.delimiter is not None:
__magic_name__ : Optional[Any] = self.delimiter
if self.column_names is not None:
__magic_name__ : Union[str, Any] = self.column_names
@property
def _UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case_ = CsvConfig
def _UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__magic_name__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
__magic_name__ : List[str] = data_files
if isinstance(snake_case , snake_case ):
__magic_name__ : Optional[int] = [files]
__magic_name__ : List[Any] = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__magic_name__ : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
__magic_name__ : List[Any] = [files]
__magic_name__ : Optional[int] = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={'''files''': files} ) )
return splits
def _UpperCAmelCase ( self : List[Any] , snake_case : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__magic_name__ : int = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case ) for feature in self.config.features.values() ):
# cheaper cast
__magic_name__ : List[str] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__magic_name__ : Optional[int] = table_cast(snake_case , snake_case )
return pa_table
def _UpperCAmelCase ( self : Optional[int] , snake_case : Any ) -> Any:
'''simple docstring'''
__magic_name__ : str = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__magic_name__ : List[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
__magic_name__ : int = pd.read_csv(snake_case , iterator=snake_case , dtype=snake_case , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case ):
__magic_name__ : str = pa.Table.from_pandas(snake_case )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(snake_case )}: {e}""" )
raise
| 147
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase_ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''esm'''
def __init__( self : Any , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=768 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : int=12 , lowerCAmelCase_ : Optional[int]=3_072 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : int=1_026 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : Any="absolute" , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : str=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase_ , mask_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Any = position_embedding_type
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Union[str, Any] = emb_layer_norm_before
UpperCAmelCase_ : List[str] = token_dropout
UpperCAmelCase_ : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
UpperCAmelCase_ : str = EsmFoldConfig()
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = EsmFoldConfig(**lowerCAmelCase_ )
UpperCAmelCase_ : int = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
UpperCAmelCase_ : Dict = get_default_vocab_list()
else:
UpperCAmelCase_ : Optional[int] = vocab_list
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
UpperCAmelCase_ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase_ ):
UpperCAmelCase_ : Union[str, Any] = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = None
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 1_28
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
if self.trunk is None:
UpperCAmelCase_ : List[str] = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = TrunkConfig(**self.trunk )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : List[Any] = asdict(self )
UpperCAmelCase_ : Dict = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = 48
__magic_name__ = 10_24
__magic_name__ = 1_28
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 32
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 4
__magic_name__ = 1_28
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
if self.structure_module is None:
UpperCAmelCase_ : int = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase_ : Dict = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase_ : Tuple = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : Optional[int] = asdict(self )
UpperCAmelCase_ : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase_ :
__magic_name__ = 3_84
__magic_name__ = 1_28
__magic_name__ = 16
__magic_name__ = 1_28
__magic_name__ = 12
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 0.1
__magic_name__ = 8
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 7
__magic_name__ = 10
__magic_name__ = 1e-8
__magic_name__ = 1e5
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
return asdict(self )
def snake_case ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 95
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : List[Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Dict = 10, 0.0
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Any):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(steps_offset=1)
UpperCamelCase__ : Any = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def __UpperCamelCase ( self : Any):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Tuple):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : str = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase__ : str = self.dummy_sample_deter - 0.1
UpperCamelCase__ : List[Any] = samplea.shape[0]
UpperCamelCase__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCamelCase__ : Optional[Any] = torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
UpperCamelCase__ : List[str] = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCamelCase__ : List[str] = scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1e-2
assert abs(result_mean.item() - 0.49_82) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1e-2
assert abs(result_mean.item() - 0.22_39_67) < 1e-3
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1e-2
assert abs(result_mean.item() - 0.06_84) < 1e-3
def __UpperCamelCase ( self : Tuple):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : int = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1e-2
assert abs(result_mean.item() - 0.19_51) < 1e-3
def __UpperCamelCase ( self : str):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Dict = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : str = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1e-2
assert abs(result_mean.item() - 0.19_41) < 1e-3
| 596
| 0
|
import os
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ : int = os.path.join(__UpperCamelCase , 'triangle.txt' )
with open(__UpperCamelCase ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for line in triangle:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
SCREAMING_SNAKE_CASE__ : List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 711
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : List[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : int )-> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase( self : Any , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Union[str, Any] , a_ : List[Any] )-> Optional[int]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : Optional[int] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = '</s>'
SCREAMING_SNAKE_CASE__ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def __lowercase( self : Dict )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a_ ) , 1103 )
def __lowercase( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def __lowercase( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ : Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ : int = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer([raw_input_str] , return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ : int = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def __lowercase( self : Any )-> str:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowercase( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = PegasusTokenizer(a_ , offset=0 , mask_token_sent=a_ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase( self : List[str] , **a_ : Optional[Any] )-> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a_ )
def __lowercase( self : Optional[Any] , a_ : Tuple )-> str:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowercase( self : str )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
SCREAMING_SNAKE_CASE__ : str = py_tokenizer([raw_input_str] , return_tensors=a_ , add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ , a_ )
@require_torch
def __lowercase( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ : Optional[int] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ : str = self._large_tokenizer(a_ , padding=a_ , truncation=a_ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ : int = self._large_tokenizer(
text_target=a_ , max_length=5 , padding=a_ , truncation=a_ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def __lowercase( self : Dict )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 636
| 0
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class lowerCamelCase_( A__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def __call__( self ):
_lowerCamelCase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
_lowerCamelCase = 1
_lowerCamelCase = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
_lowerCamelCase = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
_lowerCamelCase = scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase__ )
return result
| 661
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : int = 10 ) -> str:
if not isinstance(lowercase_ , lowercase_ ) or n < 0:
raise ValueError('''Invalid input''' )
_lowerCamelCase = 10**n
_lowerCamelCase = 2_84_33 * (pow(2 , 7_83_04_57 , lowercase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 661
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ) -> List[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : List[str] = R'.*/layers_(\d+)'
UpperCAmelCase : List[Any] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Dict = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Optional[Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Any = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : Optional[int] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : Optional[int] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Union[str, Any] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : List[str] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : str = s_dict[key].shape[0]
UpperCAmelCase : List[str] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ) -> Any:
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Any = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Optional[Any] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Tuple = str(activation[1] )
UpperCAmelCase : Dict = num_experts
UpperCAmelCase : Any = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ) -> str:
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : Optional[int] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Dict = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = flax_params['target']
UpperCAmelCase : Tuple = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : List[str] = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Dict = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 714
|
'''simple docstring'''
import baseaa
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baaencode(string.encode('utf-8' ) )
def UpperCamelCase( UpperCAmelCase_ ):
return baseaa.baadecode(UpperCAmelCase_ ).decode('utf-8' )
if __name__ == "__main__":
lowercase__ = "Hello World!"
lowercase__ = baseaa_encode(test)
print(encoded)
lowercase__ = baseaa_decode(encoded)
print(decoded)
| 695
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case ={
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 133
|
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[str]=5_6 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]=9_9 , UpperCAmelCase__ : List[str]=3_2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Union[str, Any]=7 , UpperCAmelCase__ : List[Any]="gelu_new" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Tuple=5_1_2 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : List[str]="block_sparse" , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=3 , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
lowerCAmelCase = rescale_embeddings
lowerCAmelCase = attention_type
lowerCAmelCase = use_bias
lowerCAmelCase = block_size
lowerCAmelCase = num_random_blocks
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : int = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : int ) -> str:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Dict ) -> Dict:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Optional[int] ) -> int:
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : str ) -> Dict:
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Dict:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCAmelCase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Optional[int] ):
return model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=1E-5 , UpperCAmelCase__ : Union[str, Any]="outputs" , UpperCAmelCase__ : Optional[Any]=None ) -> List[Any]:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 133
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : str =logging.get_logger(__name__)
__lowercase : List[Any] ={
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A ( __lowercase ):
_snake_case ='''deit'''
def __init__( self: Optional[Any] , _lowerCAmelCase: Any=768 , _lowerCAmelCase: str=12 , _lowerCAmelCase: List[str]=12 , _lowerCAmelCase: Optional[int]=3072 , _lowerCAmelCase: Tuple="gelu" , _lowerCAmelCase: List[Any]=0.0 , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Dict=0.02 , _lowerCAmelCase: Union[str, Any]=1e-12 , _lowerCAmelCase: Any=224 , _lowerCAmelCase: str=16 , _lowerCAmelCase: int=3 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Dict=16 , **_lowerCAmelCase: Dict , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =layer_norm_eps
UpperCAmelCase_ =image_size
UpperCAmelCase_ =patch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =qkv_bias
UpperCAmelCase_ =encoder_stride
class A ( __lowercase ):
_snake_case =version.parse('''1.11''' )
@property
def lowerCAmelCase__ ( self: str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self: Any ) -> float:
'''simple docstring'''
return 1e-4
| 718
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[Any] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowercase : str =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =state_dict.pop(lowercase__ )
UpperCAmelCase_ =val
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ =key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ =value
else:
UpperCAmelCase_ =value
return new_state_dict
def a__ ( lowercase__ , lowercase__=False ):
'''simple docstring'''
UpperCAmelCase_ =""
if is_panoptic:
UpperCAmelCase_ ="conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
UpperCAmelCase_ =state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ =in_proj_weight[:2_5_6, :]
UpperCAmelCase_ =in_proj_bias[:2_5_6]
UpperCAmelCase_ =in_proj_weight[2_5_6:5_1_2, :]
UpperCAmelCase_ =in_proj_bias[2_5_6:5_1_2]
UpperCAmelCase_ =in_proj_weight[-2_5_6:, :]
UpperCAmelCase_ =in_proj_bias[-2_5_6:]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ ="resnet101"
if "dc5" in model_name:
UpperCAmelCase_ =True
UpperCAmelCase_ ="panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ =2_5_0
else:
UpperCAmelCase_ =9_1
UpperCAmelCase_ ="huggingface/label-files"
UpperCAmelCase_ ="coco-detection-id2label.json"
UpperCAmelCase_ =json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ ={int(lowercase__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ =idalabel
UpperCAmelCase_ ={v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ ="coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ =ConditionalDetrImageProcessor(format=lowercase__ )
# prepare image
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=lowercase__ , return_tensors="pt" )
UpperCAmelCase_ =encoding["pixel_values"]
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
UpperCAmelCase_ =torch.hub.load("DeppMeng/ConditionalDETR" , lowercase__ , pretrained=lowercase__ ).eval()
UpperCAmelCase_ =conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ ="conditional_detr." + src
rename_key(lowercase__ , lowercase__ , lowercase__ )
UpperCAmelCase_ =rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ , is_panoptic=lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ ="conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ =state_dict.pop(lowercase__ )
UpperCAmelCase_ =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ =state_dict.pop(lowercase__ )
UpperCAmelCase_ =val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ =state_dict.pop(lowercase__ )
UpperCAmelCase_ =val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ =state_dict.pop(lowercase__ )
UpperCAmelCase_ =val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ =ConditionalDetrForSegmentation(lowercase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
model.push_to_hub(repo_id=lowercase__ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ =conditional_detr(lowercase__ )
UpperCAmelCase_ =model(lowercase__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__lowercase : Dict =argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowercase : Any =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 550
| 0
|
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase : List[str] = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowercase ( __UpperCamelCase : Optional[int] ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase : str = parser.parse_args()
if args.check_lib:
lowerCAmelCase : Any = importlib.import_module('''transformers''')
lowerCAmelCase : str = Path(transformers_module.__file__).parent
else:
lowerCAmelCase : Dict = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 214
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case__ = 128 , snake_case__ = 256 , snake_case__ = 2_000.0 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 64 , snake_case__ = 2048 , snake_case__ = 0.1 , ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(
nn.Linear(snake_case__ , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case__ ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : Dict = nn.Embedding(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(p=snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = nn.ModuleList()
for lyr_num in range(snake_case__ ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Union[str, Any] = DecoderLayer(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ )
self.decoders.append(snake_case__ )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(snake_case__ )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(snake_case__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Any = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : List[Any] = torch.broadcast_to(
torch.arange(snake_case__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Any = self.position_encoding(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = self.continuous_inputs_projection(snake_case__ )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Tuple = self.dropout(snake_case__ )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : Any = [(x, self.encoder_decoder_mask(snake_case__ , snake_case__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : int = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Any = lyr(
snake_case__ , conditioning_emb=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )[0]
_SCREAMING_SNAKE_CASE : Tuple = self.decoder_norm(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.spec_out(snake_case__ )
return spec_out
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=1E-6 ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case__ , d_kv=snake_case__ , num_heads=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ , layer_norm_epsilon=snake_case__ ) )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer[0](
snake_case__ , conditioning_emb=snake_case__ , attention_mask=snake_case__ , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : int = self.layer[1](
snake_case__ , key_value_states=snake_case__ , attention_mask=snake_case__ , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer[-1](snake_case__ , snake_case__ )
return (hidden_states,)
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(snake_case__ )
_SCREAMING_SNAKE_CASE : int = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.layer_norm(snake_case__ )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : int = self.FiLMLayer(snake_case__ , snake_case__ )
# Self-attention block
_SCREAMING_SNAKE_CASE : Tuple = self.attention(snake_case__ )
_SCREAMING_SNAKE_CASE : str = hidden_states + self.dropout(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = Attention(query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , out_bias=snake_case__ , scale_qk=snake_case__ )
_SCREAMING_SNAKE_CASE : int = TaLayerNorm(snake_case__ , eps=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.layer_norm(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.attention(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(snake_case__ )
return layer_output
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=snake_case__ , d_ff=snake_case__ , dropout_rate=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = TaLayerNorm(snake_case__ , eps=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(snake_case__ )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Dict = self.film(snake_case__ , snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = self.DenseReluDense(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_states + self.dropout(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_SCREAMING_SNAKE_CASE : str = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_SCREAMING_SNAKE_CASE : Any = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Dropout(snake_case__ )
_SCREAMING_SNAKE_CASE : Tuple = NewGELUActivation()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.act(self.wi_a(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Tuple = self.wi_a(snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Dict = self.dropout(snake_case__ )
_SCREAMING_SNAKE_CASE : int = self.wo(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__=1E-6 ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Dict = eps
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case__ )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase ( nn.Module ):
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(snake_case__ , 3.0 )) ))
class UpperCamelCase ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(snake_case__ , out_features * 2 , bias=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.scale_bias(snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = torch.chunk(snake_case__ , 2 , -1 )
_SCREAMING_SNAKE_CASE : Dict = x * (1 + scale) + shift
return x
| 572
| 0
|
__lowerCAmelCase : List[str] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase : Any =[{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase : str ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 260
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (EulerDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 10
def __magic_name__( self :Dict , **lowerCAmelCase__ :Any ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__( self :str ) -> Optional[Any]:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __magic_name__( self :str ) -> List[str]:
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __magic_name__( self :List[Any] ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __magic_name__( self :Dict ) -> int:
__SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Dict = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Any = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE : Dict = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = output.prev_sample
__SCREAMING_SNAKE_CASE : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_6_7_6E-0_6 ) < 1E-3
def __magic_name__( self :Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Any = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = self.dummy_model()
__SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample
__SCREAMING_SNAKE_CASE : Dict = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
__SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE : Any = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = output.prev_sample
__SCREAMING_SNAKE_CASE : Any = torch.sum(torch.abs(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 260
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a =logging.get_logger(__name__)
class __UpperCAmelCase ( __lowerCAmelCase ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 530
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class __UpperCAmelCase ( __lowerCAmelCase ):
A__ : List[Any] = '''Wav2Vec2FeatureExtractor'''
A__ : Any = '''AutoTokenizer'''
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
@classmethod
def _a ( cls , _lowerCamelCase , **_lowerCamelCase ):
try:
return super().from_pretrained(_lowerCamelCase , **_lowerCamelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _lowerCamelCase , )
lowerCamelCase__ =WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =WavaVecaCTCTokenizer.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
return cls(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowerCamelCase , **_lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCamelCase__ =kwargs.pop("raw_speech" )
else:
lowerCamelCase__ =kwargs.pop("audio" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("sampling_rate" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("text" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCamelCase__ =self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase )
if text is not None:
lowerCamelCase__ =self.tokenizer(_lowerCamelCase , **_lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCamelCase__ =encodings["input_ids"]
return inputs
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ =kwargs.pop("input_features" , _lowerCamelCase )
lowerCamelCase__ =kwargs.pop("labels" , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
lowerCamelCase__ =args[0]
lowerCamelCase__ =args[1:]
if input_features is not None:
lowerCamelCase__ =self.feature_extractor.pad(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
if labels is not None:
lowerCamelCase__ =self.tokenizer.pad(_lowerCamelCase , **_lowerCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCamelCase__ =labels["input_ids"]
return input_features
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@contextmanager
def _a ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCamelCase__ =True
lowerCamelCase__ =self.tokenizer
yield
lowerCamelCase__ =self.feature_extractor
lowerCamelCase__ =False
| 530
| 1
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_UpperCamelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase( datasets.BuilderConfig):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = '''utf-8'''
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 10 << 20 # 10MB
lowerCamelCase__ = None
class _lowerCAmelCase( datasets.ArrowBasedBuilder):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
__A = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> List[str]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
__A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
__A = data_files
if isinstance(__a , __a ):
__A = [files]
__A = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__A = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
__A = [files]
__A = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__A = self.config.features.arrow_schema.field(__a ).type
__A = pa_table.append_column(__a , pa.array([None] * len(__a ) , type=__a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__A = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase )-> Tuple:
for file_idx, file in enumerate(itertools.chain.from_iterable(__a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__A = json.load(__a )
# We keep only the field we are interested in
__A = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__a , (list, tuple) ):
__A = set().union(*[row.keys() for row in dataset] )
__A = {col: [row.get(__a ) for row in dataset] for col in keys}
else:
__A = dataset
__A = pa.Table.from_pydict(__a )
yield file_idx, self._cast_table(__a )
# If the file has one json object per line
else:
with open(__a , '''rb''' ) as f:
__A = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__A = max(self.config.chunksize // 32 , 16 << 10 )
__A = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__A = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__A = batch.decode(self.config.encoding , errors=__a ).encode('''utf-8''' )
try:
while True:
try:
__A = paj.read_json(
io.BytesIO(__a ) , read_options=paj.ReadOptions(block_size=__a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__a , pa.ArrowInvalid )
and "straddling" not in str(__a )
or block_size > len(__a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(__a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__A = json.load(__a )
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(__a )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__a , __a ): # list is the only sequence type supported in JSON
try:
__A = set().union(*[row.keys() for row in dataset] )
__A = {col: [row.get(__a ) for row in dataset] for col in keys}
__A = pa.Table.from_pydict(__a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(__a )}: {e}" )
raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(__a )
break
else:
logger.error(f"Failed to read file '{file}' with error {type(__a )}: {e}" )
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__a )
batch_idx += 1
| 717
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase : Dict = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
_UpperCamelCase : List[Any] = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
_UpperCamelCase : List[str] = """▁"""
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = AlbertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<unk>" , UpperCAmelCase="[SEP]" , UpperCAmelCase="<pad>" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , **UpperCAmelCase , )-> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A = (
AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase , normalized=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase )
else mask_token
)
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase = None )-> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__A = os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 341
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Tuple = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 293
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Optional[int] = torch.exp(__snake_case )
__lowerCAmelCase : int = torch.sum(__snake_case ,dim=1 ) # sum of exp(x_i)
__lowerCAmelCase : Optional[Any] = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Any = config.output_attentions
__lowerCAmelCase : Optional[Any] = config.output_hidden_states
__lowerCAmelCase : Tuple = nn.ModuleList([BertLayer(_SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase : int = nn.ModuleList([BertHighway(_SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__lowerCAmelCase : List[str] = [-1 for _ in range(config.num_hidden_layers)]
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[str]) -> Optional[int]:
"""simple docstring"""
if (type(_SCREAMING_SNAKE_CASE) is float) or (type(_SCREAMING_SNAKE_CASE) is int):
for i in range(len(self.early_exit_entropy)):
__lowerCAmelCase : List[Any] = x
else:
__lowerCAmelCase : str = x
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Tuple = ()
__lowerCAmelCase : Optional[Any] = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
__lowerCAmelCase : Tuple = all_hidden_states + (hidden_states,)
__lowerCAmelCase : Any = layer_module(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , head_mask[i] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = layer_outputs[0]
if self.output_attentions:
__lowerCAmelCase : List[Any] = all_attentions + (layer_outputs[1],)
__lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase : Tuple = current_outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase : List[str] = current_outputs + (all_attentions,)
__lowerCAmelCase : List[str] = self.highway[i](_SCREAMING_SNAKE_CASE)
# logits, pooled_output
if not self.training:
__lowerCAmelCase : Union[str, Any] = highway_exit[0]
__lowerCAmelCase : Dict = entropy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__lowerCAmelCase : Any = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_SCREAMING_SNAKE_CASE , i + 1)
else:
__lowerCAmelCase : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__lowerCAmelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
__lowerCAmelCase : str = (hidden_states,)
if self.output_hidden_states:
__lowerCAmelCase : Union[str, Any] = outputs + (all_hidden_states,)
if self.output_attentions:
__lowerCAmelCase : Optional[int] = outputs + (all_attentions,)
__lowerCAmelCase : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = config
__lowerCAmelCase : str = BertEmbeddings(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = DeeBertEncoder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = BertPooler(_SCREAMING_SNAKE_CASE)
self.init_weights()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler)
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = value
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_SCREAMING_SNAKE_CASE)
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , ) -> Dict:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
__lowerCAmelCase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
__lowerCAmelCase : Any = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowerCAmelCase : Union[str, Any] = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if encoder_attention_mask is None:
__lowerCAmelCase : Tuple = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE)
if token_type_ids is None:
__lowerCAmelCase : Union[str, Any] = torch.zeros(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__lowerCAmelCase : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__lowerCAmelCase : Optional[Any] = encoder_attention_mask[:, None, None, :]
__lowerCAmelCase : List[str] = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
__lowerCAmelCase : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowerCAmelCase : Union[str, Any] = self.get_head_mask(_SCREAMING_SNAKE_CASE , self.config.num_hidden_layers)
__lowerCAmelCase : Union[str, Any] = self.embeddings(
input_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.encoder(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = encoder_outputs[0]
__lowerCAmelCase : Union[str, Any] = self.pooler(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = message
__lowerCAmelCase : Union[str, Any] = exit_layer # start from 1!
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Dict) -> str:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : Optional[int] = BertPooler(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels)
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = encoder_outputs[0]
__lowerCAmelCase : Union[str, Any] = self.pooler(_SCREAMING_SNAKE_CASE)
# "return" pooler_output
# BertModel
__lowerCAmelCase : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__lowerCAmelCase : Tuple = bmodel_output[1]
__lowerCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE)
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __SCREAMING_SNAKE_CASE , )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> Dict:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = config.num_labels
__lowerCAmelCase : Tuple = config.num_hidden_layers
__lowerCAmelCase : Optional[Any] = DeeBertModel(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase : Dict = nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: Any=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=-1 , _SCREAMING_SNAKE_CASE: str=False , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.num_layers
try:
__lowerCAmelCase : Optional[int] = self.bert(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__lowerCAmelCase : Tuple = outputs[1]
__lowerCAmelCase : Union[str, Any] = self.dropout(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.classifier(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase : str = e.message
__lowerCAmelCase : Optional[Any] = e.exit_layer
__lowerCAmelCase : Optional[Any] = outputs[0]
if not self.training:
__lowerCAmelCase : int = entropy(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = []
__lowerCAmelCase : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase : Optional[int] = MSELoss()
__lowerCAmelCase : List[Any] = loss_fct(logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase : Optional[int] = CrossEntropyLoss()
__lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__lowerCAmelCase : List[str] = []
for highway_exit in outputs[-1]:
__lowerCAmelCase : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase : Optional[int] = MSELoss()
__lowerCAmelCase : str = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase : int = CrossEntropyLoss()
__lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_SCREAMING_SNAKE_CASE)
if train_highway:
__lowerCAmelCase : List[Any] = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase : int = (loss,) + outputs
if not self.training:
__lowerCAmelCase : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 293
| 1
|
import os
def SCREAMING_SNAKE_CASE ( snake_case__ ):
__UpperCAmelCase =len(grid[0] )
__UpperCAmelCase =len(snake_case__ )
__UpperCAmelCase =0
__UpperCAmelCase =0
__UpperCAmelCase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(snake_case__ ):
for j in range(n_rows - 3 ):
__UpperCAmelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__UpperCAmelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__UpperCAmelCase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__UpperCAmelCase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__UpperCAmelCase =max(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if max_product > largest:
__UpperCAmelCase =max_product
return largest
def SCREAMING_SNAKE_CASE ( ):
__UpperCAmelCase =[]
with open(os.path.dirname(snake_case__ ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
__UpperCAmelCase =[[int(snake_case__ ) for i in grid[j]] for j in range(len(snake_case__ ) )]
return largest_product(snake_case__ )
if __name__ == "__main__":
print(solution())
| 708
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> List[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCAmelCase =ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCAmelCase =['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase =key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase =prophet
__UpperCAmelCase =prophet_old
else:
__UpperCAmelCase =prophet.prophetnet
__UpperCAmelCase =prophet_old.model
__UpperCAmelCase =False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase =mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCAmelCase =attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCAmelCase =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__UpperCAmelCase =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.bias
logger.info(f"""{attribute} is initialized""" )
__UpperCAmelCase =True
break
elif attribute in special_keys and hasattr(snake_case__ , '''in_proj_weight''' ):
__UpperCAmelCase =old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCAmelCase =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCAmelCase =True
break
if attribute.isdigit():
__UpperCAmelCase =model[int(snake_case__ )]
__UpperCAmelCase =old_model[int(snake_case__ )]
else:
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCAmelCase =old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 142
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_lowerCamelCase ="""Create a default config file for Accelerate with only a few flags set."""
def _a ( lowerCamelCase="no", lowerCamelCase = default_json_config_file, lowerCamelCase = False ):
lowerCamelCase : List[Any] = Path(lowerCamelCase )
path.parent.mkdir(parents=lowerCamelCase, exist_ok=lowerCamelCase )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCamelCase : Union[str, Any] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCamelCase : Union[str, Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCamelCase : Any = torch.cuda.device_count()
lowerCamelCase : Optional[Any] = num_gpus
lowerCamelCase : Optional[int] = False
if num_gpus > 1:
lowerCamelCase : Any = """MULTI_GPU"""
else:
lowerCamelCase : Optional[int] = """NO"""
elif is_xpu_available() and use_xpu:
lowerCamelCase : str = torch.xpu.device_count()
lowerCamelCase : Tuple = num_xpus
lowerCamelCase : List[str] = False
if num_xpus > 1:
lowerCamelCase : str = """MULTI_XPU"""
else:
lowerCamelCase : List[Any] = """NO"""
elif is_npu_available():
lowerCamelCase : List[str] = torch.npu.device_count()
lowerCamelCase : Tuple = num_npus
lowerCamelCase : List[Any] = False
if num_npus > 1:
lowerCamelCase : Union[str, Any] = """MULTI_NPU"""
else:
lowerCamelCase : Any = """NO"""
else:
lowerCamelCase : int = 0
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = 1
lowerCamelCase : int = """NO"""
lowerCamelCase : Optional[int] = ClusterConfig(**lowerCamelCase )
config.to_json_file(lowerCamelCase )
return path
def _a ( lowerCamelCase, lowerCamelCase ):
lowerCamelCase : List[Any] = parser.add_parser("""default""", parents=lowerCamelCase, help=lowerCamelCase, formatter_class=lowerCamelCase )
parser.add_argument(
"""--config_file""", default=lowerCamelCase, help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
), dest="""save_location""", )
parser.add_argument(
"""--mixed_precision""", choices=["""no""", """fp16""", """bf16"""], type=lowerCamelCase, help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""", default="""no""", )
parser.set_defaults(func=lowerCamelCase )
return parser
def _a ( lowerCamelCase ):
lowerCamelCase : Tuple = write_basic_config(args.mixed_precision, args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 681
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[int] = """decision_transformer"""
_UpperCAmelCase : str = ["""past_key_values"""]
_UpperCAmelCase : Any = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __magic_name__=1_7 , __magic_name__=4 , __magic_name__=1_2_8 , __magic_name__=4_0_9_6 , __magic_name__=True , __magic_name__=1 , __magic_name__=1_0_2_4 , __magic_name__=3 , __magic_name__=1 , __magic_name__=None , __magic_name__="relu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=1e-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=5_0_2_5_6 , __magic_name__=5_0_2_5_6 , __magic_name__=False , __magic_name__=False , **__magic_name__ , ):
lowerCamelCase : Optional[int] = state_dim
lowerCamelCase : int = act_dim
lowerCamelCase : int = hidden_size
lowerCamelCase : Union[str, Any] = max_ep_len
lowerCamelCase : Optional[int] = action_tanh
lowerCamelCase : Any = vocab_size
lowerCamelCase : List[str] = n_positions
lowerCamelCase : List[Any] = n_layer
lowerCamelCase : Dict = n_head
lowerCamelCase : Optional[Any] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : Tuple = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Dict = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Tuple = scale_attn_weights
lowerCamelCase : str = use_cache
lowerCamelCase : List[Any] = scale_attn_by_inverse_layer_idx
lowerCamelCase : List[str] = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
| 681
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase__ : Optional[int] = 1_6
lowercase__ : Tuple = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 , snake_case : str = "bert-base-cased" ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Dict ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
def A_ ( snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
__UpperCamelCase = args.model_name_or_path
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case , snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained(snake_case , return_dict=snake_case )
# Instantiate optimizer
__UpperCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase = optimizer_cls(params=model.parameters() , lr=snake_case )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
__UpperCamelCase = 1
__UpperCamelCase = (len(snake_case ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=0 , num_training_steps=snake_case , )
else:
__UpperCamelCase = DummyScheduler(snake_case , total_num_steps=snake_case , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase = 0
# Now we train the model
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
__UpperCamelCase = 0
__UpperCamelCase = {}
for epoch in range(snake_case , snake_case ):
model.train()
for step, batch in enumerate(snake_case ):
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase , __UpperCamelCase = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case ) - 1:
__UpperCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
__UpperCamelCase = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case , )
parser.add_argument(
'''--output_dir''' , type=snake_case , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=snake_case , default=snake_case , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case , default=3 , help='''Number of train epochs.''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 708
|
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase__ : int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = {}
if "candidate_labels" in kwargs:
__UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="This is a photo of {}." )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = load_image(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCamelCase = candidate_labels
__UpperCamelCase = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
__UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = [text_inputs]
return inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase = model_inputs.pop('''candidate_labels''' )
__UpperCamelCase = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = text_inputs[0]
else:
# Batching case.
__UpperCamelCase = text_inputs[0][0]
__UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = model_outputs.pop('''candidate_labels''' )
__UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
__UpperCamelCase = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCamelCase = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [scores]
elif self.framework == "tf":
__UpperCamelCase = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
__UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
__UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda SCREAMING_SNAKE_CASE_ : -x[0] )
]
return result
| 451
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase ( __snake_case ):
@require_torch
def _A ( self: Optional[Any] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCamelCase )
BertModel.from_pretrained(__UpperCamelCase )
BertTokenizer.from_pretrained(__UpperCamelCase )
pipeline(task='''fill-mask''' , model=__UpperCamelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _A ( self: str ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_a = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(__UpperCamelCase )
BertModel.from_pretrained(__UpperCamelCase )
BertTokenizer.from_pretrained(__UpperCamelCase )
pipeline(task='''fill-mask''' , model=__UpperCamelCase )
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _A ( self: str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def _A ( self: Union[str, Any] ):
_a = '''
from transformers import pipeline
'''
_a = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_a = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_a = self.get_env()
_a = '''1'''
_a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def _A ( self: int ):
_a = '''
from transformers import AutoModel
'''
_a = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_a = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_a = self.get_env()
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a = '''1'''
_a = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 487
|
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __snake_case ( _UpperCamelCase ) -> str:
_a = model.config
_a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
_a = MBartConfig(
is_decoder=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , add_cross_attention=_UpperCamelCase , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=_UpperCamelCase , add_final_layer_norm=_UpperCamelCase , )
return encoder_config, decoder_config
def __snake_case ( _UpperCamelCase ) -> Dict:
if "encoder.model" in name:
_a = name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_a = name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_a = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_a = '''encoder.''' + name
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_a = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_a = '''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_a = '''encoder.layernorm.bias'''
return name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = int(key_split[5] )
_a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_a = val
return orig_state_dict
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ) -> Optional[int]:
# load original model
_a = DonutModel.from_pretrained(_UpperCamelCase ).eval()
# load HuggingFace model
_a , _a = get_configs(_UpperCamelCase )
_a = DonutSwinModel(_UpperCamelCase )
_a = MBartForCausalLM(_UpperCamelCase )
_a = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
_a = original_model.state_dict()
_a = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# verify results on scanned document
_a = load_dataset('''hf-internal-testing/example-documents''' )
_a = dataset['''test'''][0]['''image'''].convert('''RGB''' )
_a = XLMRobertaTokenizerFast.from_pretrained(_UpperCamelCase , from_slow=_UpperCamelCase )
_a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_a = DonutProcessor(_UpperCamelCase , _UpperCamelCase )
_a = processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_a = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_a = '''When is the coffee break?'''
_a = task_prompt.replace('''{user_input}''' , _UpperCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_a = '''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_a = '''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_a = '''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_a = '''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_a = '''hello world'''
else:
raise ValueError('''Model name not supported''' )
_a = original_model.decoder.tokenizer(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors='''pt''' )[
'''input_ids'''
]
_a = original_model.encoder.model.patch_embed(_UpperCamelCase )
_a , _a = model.encoder.embeddings(_UpperCamelCase )
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
# verify encoder hidden states
_a = original_model.encoder(_UpperCamelCase )
_a = model.encoder(_UpperCamelCase ).last_hidden_state
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-2 )
# verify decoder hidden states
_a = original_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).logits
_a = model(_UpperCamelCase , decoder_input_ids=_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
lowerCamelCase :Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 487
| 1
|
_snake_case = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowerCamelCase : List[Any] = 0
lowerCamelCase : int = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = []
for arabic, roman in ROMAN:
(lowerCamelCase) : str = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : List[str] = XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowerCamelCase : int = finetuning_task
lowerCamelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : Union[str, Any] = XLNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
elif "squad" in finetuning_task:
lowerCamelCase : List[Any] = finetuning_task
lowerCamelCase : str = XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : int = XLNetLMHeadModel(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE_ )}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 231
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
__lowercase= AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase= tokenizer('Hello there' , return_tensors='tf' ).input_ids
__lowercase= tokenizer('Hi I am' , return_tensors='tf' ).input_ids
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase ).loss
__lowercase= -tf.math.reduce_mean(lowerCAmelCase ).numpy()
__lowercase= -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 230
|
from math import ceil
def _lowerCamelCase( lowercase__ = 1_0_0_1 ) -> int:
'''simple docstring'''
__lowercase= 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase= 2 * i + 1
__lowercase= 2 * i
__lowercase= total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
lowerCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 230
| 1
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """Hello world! cécé herlolip"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
A_ : Optional[int] = roberta.model.encoder.sentence_encoder
A_ : Dict = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
A_ : List[Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
A_ : str = roberta_sent_encoder.embed_positions.weight
A_ : Dict = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : List[str] = roberta_sent_encoder.layer_norm.weight
A_ : Optional[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Optional[int] = roberta_layer.self_attn_layer_norm.weight
A_ : Optional[int] = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : Optional[int] = roberta_layer.self_attn.q_proj.weight
A_ : Optional[int] = roberta_layer.self_attn.q_proj.bias
A_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight
A_ : Dict = roberta_layer.self_attn.k_proj.bias
A_ : Optional[Any] = roberta_layer.self_attn.v_proj.weight
A_ : Tuple = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : int = roberta_layer.self_attn.out_proj.weight
A_ : Dict = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Optional[Any] = roberta_layer.final_layer_norm.weight
A_ : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[int] = roberta_layer.fca.weight
A_ : Optional[int] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : int = roberta_layer.fca.weight
A_ : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : List[Any] = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : List[Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : str = roberta.model.encoder.lm_head.dense.weight
A_ : Optional[int] = roberta.model.encoder.lm_head.dense.bias
A_ : List[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A_ : Tuple = roberta.model.encoder.lm_head.layer_norm.bias
A_ : str = roberta.model.encoder.lm_head.weight
A_ : Optional[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
A_ : Any = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
A_ : Optional[Any] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
A_ : Optional[int] = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 152
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = FunnelTokenizer
snake_case = FunnelTokenizerFast
snake_case = True
snake_case = True
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().setUp()
A_ : Dict = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : Optional[int] = '''UNwant\u00E9d,running'''
A_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = self.tokenizer_class(self.vocab_file )
A_ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
A_ : Optional[Any] = tokenizer('''UNwant\u00E9d,running''' )
A_ : Tuple = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
A_ : str = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 152
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = (PNDMScheduler,)
UpperCamelCase_ = (('''num_inference_steps''', 50),)
def lowercase_ ( self , **A_ ) -> Any:
"""simple docstring"""
_lowercase: int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A_ )
return config
def lowercase_ ( self , A_=0 , **A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: int = dict(self.forward_default_kwargs )
_lowercase: Tuple = kwargs.pop('''num_inference_steps''' , A_ )
_lowercase: List[Any] = self.dummy_sample
_lowercase: Dict = 0.1 * sample
_lowercase: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase: Optional[int] = self.get_scheduler_config(**A_ )
_lowercase: str = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_lowercase: Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_lowercase: Dict = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_lowercase: Any = dummy_past_residuals[:]
_lowercase: Union[str, Any] = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_lowercase: int = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowercase: Dict = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_lowercase: List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def lowercase_ ( self , A_=0 , **A_ ) -> Tuple:
"""simple docstring"""
_lowercase: List[str] = dict(self.forward_default_kwargs )
_lowercase: str = kwargs.pop('''num_inference_steps''' , A_ )
_lowercase: str = self.dummy_sample
_lowercase: str = 0.1 * sample
_lowercase: Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowercase: Optional[Any] = self.get_scheduler_config()
_lowercase: List[str] = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowercase: str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_lowercase: List[str] = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_lowercase: Tuple = dummy_past_residuals[:]
_lowercase: Optional[Any] = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_lowercase: Tuple = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowercase: Optional[int] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_lowercase: Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowercase_ ( self , **A_ ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[int] = self.scheduler_classes[0]
_lowercase: Optional[Any] = self.get_scheduler_config(**A_ )
_lowercase: Optional[Any] = scheduler_class(**A_ )
_lowercase: Any = 10
_lowercase: Tuple = self.dummy_model()
_lowercase: List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowercase: Optional[int] = model(A_ , A_ )
_lowercase: List[Any] = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowercase: Union[str, Any] = model(A_ , A_ )
_lowercase: Optional[int] = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: Tuple = dict(self.forward_default_kwargs )
_lowercase: Dict = kwargs.pop('''num_inference_steps''' , A_ )
for scheduler_class in self.scheduler_classes:
_lowercase: Tuple = self.get_scheduler_config()
_lowercase: Tuple = scheduler_class(**A_ )
_lowercase: Dict = self.dummy_sample
_lowercase: Any = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , '''set_timesteps''' ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , '''set_timesteps''' ):
_lowercase: str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowercase: Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowercase: List[Any] = dummy_past_residuals[:]
_lowercase: Union[str, Any] = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_lowercase: Dict = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowercase: int = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_lowercase: int = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_lowercase: int = self.scheduler_classes[0]
_lowercase: Optional[int] = self.get_scheduler_config(steps_offset=1 )
_lowercase: Optional[int] = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def lowercase_ ( self ) -> str:
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = 27
for scheduler_class in self.scheduler_classes:
_lowercase: Any = self.dummy_sample
_lowercase: Any = 0.1 * sample
_lowercase: Dict = self.get_scheduler_config()
_lowercase: Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowercase: List[Any] = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaises(A_ ):
_lowercase: Dict = self.scheduler_classes[0]
_lowercase: int = self.get_scheduler_config()
_lowercase: Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowercase: int = self.full_loop()
_lowercase: str = torch.sum(torch.abs(A_ ) )
_lowercase: Any = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: List[Any] = self.full_loop(prediction_type='''v_prediction''' )
_lowercase: str = torch.sum(torch.abs(A_ ) )
_lowercase: Tuple = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_lowercase: Optional[int] = torch.sum(torch.abs(A_ ) )
_lowercase: Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Dict = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_lowercase: int = torch.sum(torch.abs(A_ ) )
_lowercase: Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 353
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A__ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
_lowercase: Optional[int] = g.get_repo('''huggingface/diffusers''' )
_lowercase: Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
_lowercase: List[Any] = sorted(issue.get_comments() , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
_lowercase: Optional[Any] = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 353
| 1
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 707
|
"""simple docstring"""
import baseaa
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('utf-8' ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : List[Any] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
_snake_case : List[Any] = {'mobilebert-uncased': 512}
_snake_case : Optional[int] = {}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = MobileBertTokenizer
def __init__( self : Any , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Tuple="[UNK]" , lowerCAmelCase_ : Optional[int]="[SEP]" , lowerCAmelCase_ : Optional[int]="[PAD]" , lowerCAmelCase_ : Tuple="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int , ) -> Optional[Any]:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any=None ) -> List[str]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "unispeech"
def __init__( self: List[str] , __UpperCamelCase: Tuple=32 , __UpperCamelCase: List[Any]=768 , __UpperCamelCase: Dict=12 , __UpperCamelCase: Any=12 , __UpperCamelCase: Dict=3072 , __UpperCamelCase: List[str]="gelu" , __UpperCamelCase: Optional[int]=0.1 , __UpperCamelCase: int=0.1 , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: Optional[Any]=0.0 , __UpperCamelCase: Optional[Any]=0.0 , __UpperCamelCase: Optional[int]=0.1 , __UpperCamelCase: List[str]=0.1 , __UpperCamelCase: str=0.0_2 , __UpperCamelCase: List[str]=1E-5 , __UpperCamelCase: Tuple="group" , __UpperCamelCase: str="gelu" , __UpperCamelCase: Tuple=(512, 512, 512, 512, 512, 512, 512) , __UpperCamelCase: Any=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase: List[str]=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase: Dict=False , __UpperCamelCase: List[Any]=128 , __UpperCamelCase: Tuple=16 , __UpperCamelCase: List[Any]=False , __UpperCamelCase: List[str]=True , __UpperCamelCase: int=0.0_5 , __UpperCamelCase: Dict=10 , __UpperCamelCase: int=2 , __UpperCamelCase: str=0.0 , __UpperCamelCase: Tuple=10 , __UpperCamelCase: Optional[Any]=0 , __UpperCamelCase: List[Any]=320 , __UpperCamelCase: int=2 , __UpperCamelCase: Optional[int]=0.1 , __UpperCamelCase: str=100 , __UpperCamelCase: Union[str, Any]=256 , __UpperCamelCase: List[Any]=256 , __UpperCamelCase: Optional[int]=0.1 , __UpperCamelCase: Any="mean" , __UpperCamelCase: Any=False , __UpperCamelCase: Dict=False , __UpperCamelCase: Dict=256 , __UpperCamelCase: List[Any]=80 , __UpperCamelCase: str=0 , __UpperCamelCase: Tuple=1 , __UpperCamelCase: Union[str, Any]=2 , __UpperCamelCase: List[str]=0.5 , **__UpperCamelCase: List[Any] , ) -> List[Any]:
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
__magic_name__ : int = hidden_size
__magic_name__ : int = feat_extract_norm
__magic_name__ : str = feat_extract_activation
__magic_name__ : Any = list(__UpperCamelCase )
__magic_name__ : List[str] = list(__UpperCamelCase )
__magic_name__ : Tuple = list(__UpperCamelCase )
__magic_name__ : Tuple = conv_bias
__magic_name__ : Tuple = num_conv_pos_embeddings
__magic_name__ : Optional[Any] = num_conv_pos_embedding_groups
__magic_name__ : Tuple = len(self.conv_dim )
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Any = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : str = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_dropout
__magic_name__ : Tuple = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : List[str] = feat_proj_dropout
__magic_name__ : Optional[int] = final_dropout
__magic_name__ : Optional[Any] = layerdrop
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = num_ctc_classes
__magic_name__ : Any = vocab_size
__magic_name__ : int = do_stable_layer_norm
__magic_name__ : str = use_weighted_layer_sum
__magic_name__ : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : Optional[int] = apply_spec_augment
__magic_name__ : Tuple = mask_time_prob
__magic_name__ : int = mask_time_length
__magic_name__ : List[Any] = mask_time_min_masks
__magic_name__ : List[Any] = mask_feature_prob
__magic_name__ : Tuple = mask_feature_length
__magic_name__ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ : Union[str, Any] = num_codevectors_per_group
__magic_name__ : Optional[Any] = num_codevector_groups
__magic_name__ : Tuple = contrastive_logits_temperature
__magic_name__ : int = feat_quantizer_dropout
__magic_name__ : List[str] = num_negatives
__magic_name__ : int = codevector_dim
__magic_name__ : Any = proj_codevector_dim
__magic_name__ : Tuple = diversity_loss_weight
# ctc loss
__magic_name__ : Optional[int] = ctc_loss_reduction
__magic_name__ : List[str] = ctc_zero_infinity
# pretraining loss
__magic_name__ : int = replace_prob
@property
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 436
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_A = datasets.utils.logging.get_logger(__name__)
_A = ['names', 'prefix']
_A = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_A = ['encoding_errors', 'on_bad_lines']
_A = ['date_format']
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
_lowerCamelCase : str = ","
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
_lowerCamelCase : Optional[Union[List[int], List[str]]] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
_lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
_lowerCamelCase : Optional[list] = None
_lowerCamelCase : Optional[list] = None
_lowerCamelCase : bool = False
_lowerCamelCase : Optional[Union[int, List[int]]] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Union[str, List[str]]] = None
_lowerCamelCase : bool = True
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : str = "."
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : str = '"'
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : bool = True
_lowerCamelCase : bool = True
_lowerCamelCase : int = 0
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : int = 10000
_lowerCamelCase : Optional[datasets.Features] = None
_lowerCamelCase : Optional[str] = "strict"
_lowerCamelCase : Literal["error", "warn", "skip"] = "error"
_lowerCamelCase : Optional[str] = None
def __magic_name__ ( self ):
if self.delimiter is not None:
a_ = self.delimiter
if self.column_names is not None:
a_ = self.column_names
@property
def __magic_name__ ( self ):
a_ = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _SCREAMING_SNAKE_CASE ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
_lowerCamelCase : Optional[int] = CsvConfig
def __magic_name__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_SCREAMING_SNAKE_CASE , (str, list, tuple) ):
a_ = data_files
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = [files]
a_ = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a_ = []
for split_name, files in data_files.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a_ = [files]
a_ = [dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=_SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) )
return splits
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
if self.config.features is not None:
a_ = self.config.features.arrow_schema
if all(not require_storage_cast(_SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ):
# cheaper cast
a_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_SCREAMING_SNAKE_CASE )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a_ = table_cast(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return pa_table
def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ):
a_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_SCREAMING_SNAKE_CASE ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) ):
a_ = pd.read_csv(_SCREAMING_SNAKE_CASE , iterator=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_SCREAMING_SNAKE_CASE ):
a_ = pa.Table.from_pandas(_SCREAMING_SNAKE_CASE )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_SCREAMING_SNAKE_CASE )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_SCREAMING_SNAKE_CASE )}: {e}""" )
raise
| 720
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list[int] , UpperCamelCase : list[int] ) -> tuple[float, float]:
"""simple docstring"""
if not len(UpperCamelCase ) == len(UpperCamelCase ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
a_ , a_ , a_ = equationa
a_ , a_ , a_ = equationa
# Calculate the determinants of the matrices
a_ = aa * ba - aa * ba
a_ = ca * ba - ca * ba
a_ = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
a_ = determinant_x / determinant
a_ = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 403
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class A :
lowerCamelCase : int
lowerCamelCase : TreeNode | None = None
lowerCamelCase : TreeNode | None = None
__A = namedtuple("CoinsDistribResult", "moves excess")
def _A ( lowercase__ ):
if root is None:
return 0
# Validation
def count_nodes(lowercase__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowercase__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowercase__ ) != count_coins(lowercase__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(lowercase__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowercase__ )
+ abs(lowercase__ )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowercase__ , lowercase__ )
return get_distrib(lowercase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = DebertaTokenizer
lowerCamelCase : Any = True
lowerCamelCase : Dict = DebertaTokenizerFast
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowercase__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase__ = {"""unk_token""": """[UNK]"""}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def A__ ( self , **lowerCamelCase__ ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowercase__ = """lower newer"""
lowercase__ = """lower newer"""
return input_text, output_text
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = """lower newer"""
lowercase__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase__ = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = tokens + [tokenizer.unk_token]
lowercase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = tokenizer("""Hello""" , """World""" )
lowercase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowerCamelCase__ )
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
lowercase__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
lowercase__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase__ = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowercase__ = tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ )
lowercase__ = [tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ ) for seq in encoding["""input_ids"""]]
# fmt: off
lowercase__ = {
"""input_ids""": [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase__ = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , lowerCamelCase__ )
for expected, decoded in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 325
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_a = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__a , cache_dir=__a )
_a = [t[-1] for t in os.walk(os.path.join(__a , os.listdir(__a )[0] , "snapshots" ) )]
_a = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__a )
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.random.PRNGKey(0 )
_a = 4
_a = jax.device_count()
_a = num_samples * [prompt]
_a = pipeline.prepare_inputs(__a )
# shard inputs and rng
_a = replicate(__a )
_a = jax.random.split(__a , __a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__a , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_a = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__a ) == num_samples
def UpperCamelCase__ ( self : Any ):
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__a )
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = pipeline.prepare_inputs(__a )
# shard inputs and rng
_a = replicate(__a )
_a = jax.random.split(__a , __a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCamelCase__ ( self : str ):
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__a )
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = pipeline.prepare_inputs(__a )
# shard inputs and rng
_a = replicate(__a )
_a = jax.random.split(__a , __a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCamelCase__ ( self : Dict ):
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = pipeline.prepare_inputs(__a )
# shard inputs and rng
_a = replicate(__a )
_a = jax.random.split(__a , __a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCamelCase__ ( self : Optional[Any] ):
_a = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__a , steps_offset=1 , )
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__a , safety_checker=__a , )
_a = scheduler.create_state()
_a = scheduler_state
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.random.PRNGKey(0 )
_a = 50
_a = jax.device_count()
_a = num_samples * [prompt]
_a = pipeline.prepare_inputs(__a )
# shard inputs and rng
_a = replicate(__a )
_a = jax.random.split(__a , __a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__a , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCamelCase__ ( self : str ):
_a = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_a = jax.device_count()
_a = num_samples * [prompt]
_a = jax.random.split(jax.random.PRNGKey(0 ) , __a )
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__a , )
_a = replicate(__a )
_a = pipeline.prepare_inputs(__a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , jit=__a ).images
assert images.shape == (num_samples, 1, 5_12, 5_12, 3)
_a = images[2, 0, 2_56, 10:17, 1]
# With memory efficient attention
_a , _a = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__a , use_memory_efficient_attention=__a , )
_a = replicate(__a )
_a = pipeline.prepare_inputs(__a )
_a = shard(__a )
_a = pipeline(__a , __a , __a , jit=__a ).images
assert images_eff.shape == (num_samples, 1, 5_12, 5_12, 3)
_a = images[2, 0, 2_56, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 521
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCAmelCase_ : Tuple = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowerCAmelCase_ : Union[str, Any] = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowerCAmelCase_ : Tuple = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[
"https://en.wikipedia.org/wiki/ROUGE_(metric)",
"https://github.com/google-research/google-research/tree/master/rouge",
] , )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] , __a : str , __a : int=None , __a : Dict=True , __a : Optional[int]=False ):
if rouge_types is None:
_a = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
_a = rouge_scorer.RougeScorer(rouge_types=__a , use_stemmer=__a )
if use_aggregator:
_a = scoring.BootstrapAggregator()
else:
_a = []
for ref, pred in zip(__a , __a ):
_a = scorer.score(__a , __a )
if use_aggregator:
aggregator.add_scores(__a )
else:
scores.append(__a )
if use_aggregator:
_a = aggregator.aggregate()
else:
_a = {}
for key in scores[0]:
_a = [score[key] for score in scores]
return result
| 521
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = """▁"""
_lowerCamelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowerCamelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowerCamelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowerCamelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[Any] =VOCAB_FILES_NAMES
__A : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict =PRETRAINED_VOCAB_FILES_MAP
__A : Dict =["input_ids", "attention_mask"]
__A : List[int] =[]
__A : List[int] =[]
def __init__( self ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case="</s>" ,_snake_case="</s>" ,_snake_case="<s>" ,_snake_case="<unk>" ,_snake_case="<pad>" ,_snake_case="<mask>" ,_snake_case = None ,**_snake_case ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Optional[Any] = AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ) if isinstance(_snake_case ,_snake_case ) else mask_token
UpperCAmelCase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ : Optional[Any] = kwargs.get("additional_special_tokens" ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_snake_case ,tgt_lang=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,cls_token=_snake_case ,pad_token=_snake_case ,mask_token=_snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**_snake_case ,)
UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_snake_case ) )
UpperCAmelCase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model )
UpperCAmelCase_ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case )
}
UpperCAmelCase_ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : List[Any] = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
UpperCAmelCase_ : List[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
return state
def __setstate__( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self ,_snake_case ):
return self.sp_model.encode(_snake_case ,out_type=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self ,_snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case ) + token
UpperCAmelCase_ : int = True
UpperCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(_snake_case )
UpperCAmelCase_ : Any = False
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Optional[Any] = os.path.join(
_snake_case ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case ,"wb" ) as fi:
UpperCAmelCase_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case ,token_ids_a=_snake_case ,already_has_special_tokens=_snake_case )
UpperCAmelCase_ : Union[str, Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case )) + suffix_ones
return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : Dict = self(_snake_case ,add_special_tokens=_snake_case ,return_tensors=_snake_case ,**_snake_case )
UpperCAmelCase_ : List[str] = self.convert_tokens_to_ids(_snake_case )
UpperCAmelCase_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = "en_XX" ,_snake_case = None ,_snake_case = "ro_RO" ,**_snake_case ,):
UpperCAmelCase_ : Optional[Any] = src_lang
UpperCAmelCase_ : int = tgt_lang
return super().prepare_seqaseq_batch(_snake_case ,_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : List[str] = [self.cur_lang_code_id]
UpperCAmelCase_ : Dict = [self.eos_token_id]
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : Optional[Any] = [self.cur_lang_code_id]
UpperCAmelCase_ : int = [self.eos_token_id]
| 71
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Optional[int] =DebertaVaTokenizer
__A : Union[str, Any] =DebertaVaTokenizerFast
__A : str =True
__A : List[str] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(_snake_case ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = "this is a test"
UpperCAmelCase_ : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = "<pad>"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) ,_snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(_snake_case ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : str = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Union[str, Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Dict = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[Any] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = DebertaVaTokenizerFast(_snake_case ,do_lower_case=_snake_case ,split_by_punct=_snake_case )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case ) )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Tuple = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : int = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : Optional[int] = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : Optional[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : str = DebertaVaTokenizer(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(_snake_case ,keep_accents=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : str = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : List[str] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : List[str] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
UpperCAmelCase_ : Any = rust_tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : Dict = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_snake_case ,_snake_case )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,_snake_case )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,_snake_case ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 71
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 1
|
from __future__ import annotations
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :str = order
# a_{0} ... a_{k}
a :List[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
a :Any = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
a :Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
a :int = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
if len(_lowerCamelCase ) < self.order:
a :Tuple = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
a :Optional[Any] = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
a :List[str] = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(_lowerCamelCase )}'''
)
raise ValueError(_lowerCamelCase )
a :List[Any] = a_coeffs
a :str = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
a :Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
a :List[Any] = self.input_history[:-1]
a :Any = self.output_history[:-1]
a :Optional[int] = sample
a :Optional[Any] = result
return result
| 445
|
from __future__ import annotations
from typing import Any
class _snake_case ( _snake_case ):
pass
class _snake_case :
def __init__( self , _lowerCamelCase ):
a :Any = data
a :Node | None = None
def __iter__( self ):
a :Dict = self
a :Optional[int] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowerCamelCase )
yield node.data
a :str = node.next_node
@property
def SCREAMING_SNAKE_CASE__ ( self ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
snake_case : int = Node(1)
snake_case : int = Node(2)
snake_case : Optional[int] = Node(3)
snake_case : List[str] = Node(4)
print(root_node.has_loop) # False
snake_case : Optional[Any] = root_node.next_node
print(root_node.has_loop) # True
snake_case : Dict = Node(5)
snake_case : List[Any] = Node(6)
snake_case : Optional[int] = Node(5)
snake_case : Union[str, Any] = Node(6)
print(root_node.has_loop) # False
snake_case : Tuple = Node(1)
print(root_node.has_loop) # False
| 445
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( _snake_case , unittest.TestCase ):
__lowerCamelCase = ShapEPipeline
__lowerCamelCase = ["""prompt"""]
__lowerCamelCase = ["""prompt"""]
__lowerCamelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase = False
@property
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return 32
@property
def UpperCAmelCase ( self :int ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return 8
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(snake_case_ )
@property
def UpperCAmelCase ( self :int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowercase__ = PriorTransformer(**snake_case_ )
return model
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**snake_case_ )
return model
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_text_encoder
lowercase__ = self.dummy_tokenizer
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=snake_case_ , clip_sample=snake_case_ , clip_sample_range=1.0 , )
lowercase__ = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[Any] , _lowercase :Any=0 ):
'''simple docstring'''
if str(snake_case_ ).startswith("mps" ):
lowercase__ = torch.manual_seed(snake_case_ )
else:
lowercase__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowercase__ = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**snake_case_ )
lowercase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__ = pipe(**self.get_dummy_inputs(snake_case_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self :int ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch_device == "cpu"
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=snake_case_ , relax_max_difference=snake_case_ , )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**snake_case_ )
lowercase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(snake_case_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**snake_case_ , num_images_per_prompt=snake_case_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
lowercase__ = ShapEPipeline.from_pretrained("openai/shap-e" )
lowercase__ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__ = torch.Generator(device=snake_case_ ).manual_seed(0 )
lowercase__ = pipe(
"a shark" , generator=snake_case_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 721
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_snake_case = logging.getLogger(__name__)
_snake_case = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase_ )} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
if self.train_file is not None:
lowercase__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _A ( __magic_name__ , __magic_name__ ):
with open(__magic_name__ , "r" , encoding="utf-8" ) as f:
lowercase__ = [json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
lowercase__ = {c: dataset[c] for c in dataset.column_names}
lowercase__ = refs
return Dataset.from_dict(__magic_name__ )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowercase__ = {}
if data_args.train_file is not None:
lowercase__ = data_args.train_file
if data_args.validation_file is not None:
lowercase__ = data_args.validation_file
lowercase__ = data_args.train_file.split("." )[-1]
if extension == "txt":
lowercase__ = "text"
lowercase__ = load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
lowercase__ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowercase__ = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
lowercase__ = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
lowercase__ = AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase__ = datasets["train"].column_names
else:
lowercase__ = datasets["validation"].column_names
lowercase__ = "text" if "text" in column_names else column_names[0]
lowercase__ = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ ):
# Remove empty lines
lowercase__ = [line for line in examples["text"] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
lowercase__ = datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase__ = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase__ = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase__ = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase__ = DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase__ = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase__ = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase__ = model_args.model_name_or_path
else:
lowercase__ = None
lowercase__ = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase__ = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = math.exp(eval_output["eval_loss"] )
lowercase__ = perplexity
lowercase__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 611
| 0
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = RoFormerTokenizer
UpperCamelCase = RoFormerTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def a__ ( self : str ) -> List[Any]:
"""simple docstring"""
super().setUp()
def a__ ( self : int , **A_ : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **A_ )
def a__ ( self : List[str] , **A_ : Optional[int] ) -> Any:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **A_ )
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = '永和服装饰品有限公司,今天天气非常好'
lowerCamelCase_ = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ , lowerCamelCase_ = self.get_chinese_input_output_texts()
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ , lowerCamelCase_ = self.get_chinese_input_output_texts()
lowerCamelCase_ = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , output_text.split() )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
| 70
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if numbers[j] < numbers[i]:
UpperCAmelCase , UpperCAmelCase = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 341
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : str = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""", return_dict=lowerCamelCase__ ).to(lowerCamelCase__ )
A : List[str] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A : Tuple = tokenizer("""Hello there""", return_tensors="""pt""" ).input_ids
A : Optional[int] = tokenizer("""Hi I am""", return_tensors="""pt""" ).input_ids
A : int = model(input_ids.to(lowerCamelCase__ ), labels=labels.to(lowerCamelCase__ ) ).loss
A : Any = -(labels.shape[-1] * loss.item())
A : Union[str, Any] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 707
|
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_:Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
SCREAMING_SNAKE_CASE_:Optional[int] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
SCREAMING_SNAKE_CASE_:List[str] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Optional[int] = get_test_to_tester_mapping(lowerCamelCase__ )
A : str = get_test_to_tester_mapping(lowerCamelCase__ )
A : int = {"""BertModelTest""": """BertModelTester"""}
A : Tuple = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : List[Any] = get_model_to_test_mapping(lowerCamelCase__ )
A : str = get_model_to_test_mapping(lowerCamelCase__ )
A : Union[str, Any] = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A : str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Tuple = get_model_to_tester_mapping(lowerCamelCase__ )
A : Optional[int] = get_model_to_tester_mapping(lowerCamelCase__ )
A : int = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A : Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ), lowerCamelCase__ )
| 520
| 0
|
def __a ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE : Any = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE : List[str] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCAmelCase ) == 26
def __a ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
SCREAMING_SNAKE_CASE : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE : Any = True
elif char.isupper():
SCREAMING_SNAKE_CASE : Union[str, Any] = True
return all(__lowerCAmelCase )
def __a ( __lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ) -> None:
from timeit import timeit
SCREAMING_SNAKE_CASE : Any = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=__lowerCAmelCase ) )
print(timeit('is_pangram_faster()' , setup=__lowerCAmelCase ) )
print(timeit('is_pangram_fastest()' , setup=__lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 352
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 352
| 1
|
from torch import nn
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 139
|
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [[0 for _ in range(_A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ = 1
for n in range(m + 1 ):
for k in range(1 , _A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase__ : str = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
lowercase__ : Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 139
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Optional[Any]=8 ) ->Tuple:
lowerCamelCase__ : int =height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase__ : int =width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :UNetaDConditionModel , lowerCamelCase_ :DDPMScheduler , lowerCamelCase_ :VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , )
lowerCamelCase__ : Optional[int] =2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :str , lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
if latents is None:
lowerCamelCase__ : List[str] =randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCamelCase__ : Any =latents.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :str=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCamelCase__ : Optional[Any] =torch.device(f"""cuda:{gpu_id}""" )
lowerCamelCase__ : Union[str, Any] =[
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCamelCase__ : Tuple =torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowerCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase__ : int =None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ )
# We'll offload the last model manually.
lowerCamelCase__ : Tuple =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase_ )
def __call__( self :Any , lowerCamelCase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :int = 512 , lowerCamelCase_ :int = 512 , lowerCamelCase_ :int = 100 , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :int = 1 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[torch.FloatTensor] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self._execution_device
lowerCamelCase__ : Any =guidance_scale > 1.0
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : str =torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Dict =torch.cat(lowerCamelCase_ , dim=0 )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase__ : Union[str, Any] =torch.cat(lowerCamelCase_ , dim=0 )
lowerCamelCase__ : Dict =image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
lowerCamelCase__ : int =image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
lowerCamelCase__ : Optional[Any] =negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 )
lowerCamelCase__ : List[Any] =hint.repeat_interleave(lowerCamelCase_ , dim=0 )
lowerCamelCase__ : int =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
lowerCamelCase__ : str =torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ )
self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.scheduler.timesteps
lowerCamelCase__ : Dict =self.movq.config.latent_channels
lowerCamelCase__ , lowerCamelCase__ : int =downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor )
# create initial latent
lowerCamelCase__ : Dict =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__ : Any =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase__ : List[Any] ={'image_embeds': image_embeds, 'hint': hint}
lowerCamelCase__ : Union[str, Any] =self.unet(
sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] =noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase__ , lowerCamelCase__ : int =noise_pred.chunk(2 )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =variance_pred.chunk(2 )
lowerCamelCase__ : Dict =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase__ : Union[str, Any] =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase__ , lowerCamelCase__ : List[str] =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any =self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0]
# post-processing
lowerCamelCase__ : Dict =self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCamelCase__ : List[str] =image * 0.5 + 0.5
lowerCamelCase__ : Any =image.clamp(0 , 1 )
lowerCamelCase__ : Any =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase__ : Union[str, Any] =self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 174
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple =DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
lowerCamelCase__ : Any =1_0_2_4
lowerCamelCase__ : Optional[Any] =4_0_9_6
lowerCamelCase__ : Optional[int] =2_4
lowerCamelCase__ : List[Any] =1_6
lowerCamelCase__ : List[str] =[5, 1_1, 1_7, 2_3]
lowerCamelCase__ : Optional[Any] =[2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
lowerCamelCase__ : Any =(1, 3_8_4, 3_8_4)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ : int =7_6_8
lowerCamelCase__ : Optional[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Dict =[2_5_6, 5_1_2, 7_6_8, 7_6_8]
lowerCamelCase__ : Tuple =1_5_0
lowerCamelCase__ : Optional[Any] =1_6
lowerCamelCase__ : int =(1, 3_8_4, 3_8_4)
lowerCamelCase__ : Optional[Any] =False
lowerCamelCase__ : Any ='project'
if "ade" in checkpoint_url:
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Dict =7_6_8
lowerCamelCase__ : List[Any] =[1, 1, 1, 0.5]
lowerCamelCase__ : Any =1_5_0
lowerCamelCase__ : List[str] =1_6
lowerCamelCase__ : Any ='huggingface/label-files'
lowerCamelCase__ : List[Any] ='ade20k-id2label.json'
lowerCamelCase__ : List[Any] =json.load(open(cached_download(hf_hub_url(snake_case_ , snake_case_ , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Dict =idalabel
lowerCamelCase__ : Any ={v: k for k, v in idalabel.items()}
lowerCamelCase__ : int =[1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Union[str, Any] =['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Any ) ->Tuple:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ : List[str] =name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
lowerCamelCase__ : Any =name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
lowerCamelCase__ : Tuple =name.replace('patch_embed' , '' )
if "pos_embed" in name:
lowerCamelCase__ : int =name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
lowerCamelCase__ : Dict =name.replace('proj' , 'projection' )
if "blocks" in name:
lowerCamelCase__ : Any =name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
lowerCamelCase__ : Dict =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
lowerCamelCase__ : Any =name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ : Optional[Any] =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ : Optional[int] =name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
lowerCamelCase__ : List[str] =name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
lowerCamelCase__ : str =name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
lowerCamelCase__ : List[Any] =name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
lowerCamelCase__ : Any =name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
lowerCamelCase__ : Dict =name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
lowerCamelCase__ : Optional[int] =int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ : Union[str, Any] =name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCamelCase__ : List[Any] =name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
lowerCamelCase__ : str =name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
lowerCamelCase__ : List[str] =name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
lowerCamelCase__ : Any =name.replace('conv1' , 'convolution1' )
if "conv2" in name:
lowerCamelCase__ : Any =name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ : Optional[Any] =name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ : Optional[int] =name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ : Dict =name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ : List[str] =name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ : str =name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ : int =name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ : List[Any] =name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
lowerCamelCase__ : Union[str, Any] =name.replace('pretrained' , 'dpt' )
if "bn" in name:
lowerCamelCase__ : Tuple =name.replace('bn' , 'batch_norm' )
if "head" in name:
lowerCamelCase__ : Any =name.replace('head' , 'head.head' )
if "encoder.norm" in name:
lowerCamelCase__ : Dict =name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
lowerCamelCase__ : int =name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
lowerCamelCase__ : str =name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
lowerCamelCase__ : Optional[int] =name.replace('..' , '.' )
if "stem.conv" in name:
lowerCamelCase__ : List[Any] =name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase__ : Dict =name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
lowerCamelCase__ : List[Any] =name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ : int =name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
lowerCamelCase__ : List[str] =name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ : str =name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) ->List[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ : Any =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCamelCase__ : str =state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : List[Any] =in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ : Any =in_proj_bias[: config.hidden_size]
lowerCamelCase__ : Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ : int =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ : Tuple =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] ='http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Dict =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : int ) ->int:
lowerCamelCase__ , lowerCamelCase__ : List[Any] =get_dpt_config(snake_case_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ : Union[str, Any] =torch.load(snake_case_ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case_ )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ : str =state_dict.pop(snake_case_ )
lowerCamelCase__ : Tuple =val
# read in qkv matrices
read_in_q_k_v(snake_case_ , snake_case_ )
# load HuggingFace model
lowerCamelCase__ : str =DPTForSemanticSegmentation(snake_case_ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
# Check outputs on an image
lowerCamelCase__ : Optional[int] =4_8_0 if 'ade' in checkpoint_url else 3_8_4
lowerCamelCase__ : Optional[Any] =DPTImageProcessor(size=snake_case_ )
lowerCamelCase__ : Optional[int] =prepare_img()
lowerCamelCase__ : Optional[int] =image_processor(snake_case_ , return_tensors='pt' )
# forward pass
lowerCamelCase__ : int =model(**snake_case_ ).logits if 'ade' in checkpoint_url else model(**snake_case_ ).predicted_depth
if show_prediction:
lowerCamelCase__ : Optional[Any] =(
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_5_5 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 174
| 1
|
from __future__ import annotations
import numpy as np
def __lowerCamelCase (UpperCAmelCase__ : list[float] ):
return np.maximum(0 , UpperCAmelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 647
|
def __lowerCamelCase (UpperCAmelCase__ : list[int] ):
if not numbers:
return 0
if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0]
for i in range(1 , len(UpperCAmelCase__ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number )
SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ )
return max_prod
| 647
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
_lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
_lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase ( a , a=100 , a=" " ) -> List[str]:
'''simple docstring'''
__magic_name__ = text.split(a__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(a__ ) , a__ )]
def UpperCamelCase ( a ) -> dict:
'''simple docstring'''
__magic_name__ = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(a__ ):
titles.append(title if title is not None else '''''' )
texts.append(a__ )
return {"title": titles, "text": texts}
def UpperCamelCase ( a , a , a ) -> dict:
'''simple docstring'''
__magic_name__ = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=a__ , padding='''longest''' , return_tensors='''pt''' )['input_ids']
__magic_name__ = ctx_encoder(input_ids.to(device=a__ ) , return_dict=a__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase ( a , a , a , ) -> Optional[int]:
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__magic_name__ = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__magic_name__ = dataset.map(a__ , batched=a__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__magic_name__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=a__ )
__magic_name__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__magic_name__ = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
__magic_name__ = dataset.map(
partial(a__ , ctx_encoder=a__ , ctx_tokenizer=a__ ) , batched=a__ , batch_size=processing_args.batch_size , features=a__ , )
# And finally save your dataset
__magic_name__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(a__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__magic_name__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=a__ )
# And save the index
__magic_name__ = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(a__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :str = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns \'title\' and \'text\'"""} ,)
__SCREAMING_SNAKE_CASE :Optional[str] = field(
default=lowercase_ ,metadata={"""help""": """Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"""} ,)
__SCREAMING_SNAKE_CASE :str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={
"""help""": (
"""The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or"""
""" \'facebook/dpr-ctx_encoder-multiset-base\'"""
)
} ,)
__SCREAMING_SNAKE_CASE :Optional[str] = field(
default=str(Path(lowercase_ ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,)
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :Optional[int] = field(
default=lowercase_ ,metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} ,)
__SCREAMING_SNAKE_CASE :int = field(
default=16 ,metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} ,)
@dataclass
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :int = field(
default=768 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,)
__SCREAMING_SNAKE_CASE :int = field(
default=128 ,metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
_lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
_lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 432
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = get_dataset(a__ , a__)
print('Processing...')
_snake_case , _snake_case , _snake_case : Tuple = update_image_and_anno(a__ , a__ , a__)
for index, image in enumerate(a__):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : List[str] = random_chars(3_2)
_snake_case : List[str] = paths[index].split(os.sep)[-1].rsplit('.' , 1)[0]
_snake_case : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a__ , [cva.IMWRITE_JPEG_QUALITY, 8_5])
print(F"""Success {index+1}/{len(a__)} with {file_name}""")
_snake_case : str = []
for anno in new_annos[index]:
_snake_case : Union[str, Any] = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a__)
with open(F"""/{file_root}.txt""" , 'w') as outfile:
outfile.write('\n'.join(line for line in annos_list))
def lowerCamelCase__ ( a__ , a__) -> tuple[list, list]:
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : int = []
for label_file in glob.glob(os.path.join(a__ , '*.txt')):
_snake_case : List[str] = label_file.split(os.sep)[-1].rsplit('.' , 1)[0]
with open(a__) as in_file:
_snake_case : Union[str, Any] = in_file.readlines()
_snake_case : str = os.path.join(a__ , F"""{label_name}.jpg""")
_snake_case : Dict = []
for obj_list in obj_lists:
_snake_case : Dict = obj_list.rstrip('\n').split(' ')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(a__)
labels.append(a__)
return img_paths, labels
def lowerCamelCase__ ( a__ , a__ , a__ = 1) -> tuple[list, list, list]:
"""simple docstring"""
_snake_case : Dict = []
_snake_case : List[str] = []
_snake_case : Optional[int] = []
for idx in range(len(a__)):
_snake_case : Any = []
_snake_case : str = img_list[idx]
path_list.append(a__)
_snake_case : List[Any] = anno_list[idx]
_snake_case : Union[str, Any] = cva.imread(a__)
if flip_type == 1:
_snake_case : Optional[int] = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
_snake_case : Tuple = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(a__)
new_imgs_list.append(a__)
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase__ ( a__ = 3_2) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_snake_case : int = ascii_lowercase + digits
return "".join(random.choice(a__) for _ in range(a__))
if __name__ == "__main__":
main()
print("DONE ✅")
| 517
| 0
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a__ : List[str] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =["input_features", "is_longer"]
def __init__( self : int , __A : List[Any]=6_4 , __A : str=4_8_0_0_0 , __A : Optional[Any]=4_8_0 , __A : Optional[Any]=1_0 , __A : Union[str, Any]=1_0_2_4 , __A : Tuple=0.0 , __A : Optional[int]=False , __A : float = 0 , __A : float = 1_4_0_0_0 , __A : int = None , __A : str = "fusion" , __A : str = "repeatpad" , **__A : Optional[int] , ):
super().__init__(
feature_size=__A , sampling_rate=__A , padding_value=__A , return_attention_mask=__A , **__A , )
__UpperCamelCase = top_db
__UpperCamelCase = truncation
__UpperCamelCase = padding
__UpperCamelCase = fft_window_size
__UpperCamelCase = (fft_window_size >> 1) + 1
__UpperCamelCase = hop_length
__UpperCamelCase = max_length_s
__UpperCamelCase = max_length_s * sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = frequency_min
__UpperCamelCase = frequency_max
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm=__A , mel_scale='htk' , )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm='slaney' , mel_scale='slaney' , )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCamelCase ( self : Dict , __A : np.array , __A : Optional[np.array] = None ):
__UpperCamelCase = spectrogram(
__A , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__A , log_mel='dB' , )
return log_mel_spectrogram.T
def _lowerCamelCase ( self : List[Any] , __A : str , __A : Dict , __A : int ):
__UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
# randomly choose index for each part
__UpperCamelCase = np.random.choice(ranges[0] )
__UpperCamelCase = np.random.choice(ranges[1] )
__UpperCamelCase = np.random.choice(ranges[2] )
__UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase = torch.tensor(mel[None, None, :] )
__UpperCamelCase = torch.nn.functional.interpolate(
__A , size=[chunk_frames, 6_4] , mode='bilinear' , align_corners=__A )
__UpperCamelCase = mel_shrink[0][0].numpy()
__UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowerCamelCase ( self : Union[str, Any] , __A : np.array , __A : List[str] , __A : Dict , __A : Union[str, Any] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase = len(__A ) - max_length
__UpperCamelCase = np.random.randint(0 , overflow + 1 )
__UpperCamelCase = waveform[idx : idx + max_length]
__UpperCamelCase = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(__A , self.mel_filters )
__UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase = False
else:
__UpperCamelCase = self._random_mel_fusion(__A , __A , __A )
__UpperCamelCase = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase = int(max_length / len(__A ) )
__UpperCamelCase = np.stack(np.tile(__A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase = int(max_length / len(__A ) )
__UpperCamelCase = np.stack(np.tile(__A , __A ) )
__UpperCamelCase = np.pad(__A , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(__A , self.mel_filters )
__UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int , __A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __A : str = None , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[Union[str, TensorType]] = None , **__A : Dict , ):
__UpperCamelCase = truncation if truncation is not None else self.truncation
__UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__UpperCamelCase = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__UpperCamelCase = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__UpperCamelCase = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray(__A )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase = [
self._get_input_mel(__A , max_length if max_length else self.nb_max_samples , __A , __A )
for waveform in raw_speech
]
__UpperCamelCase = []
__UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(__A )
is_longer.append(__A )
if truncation == "fusion" and sum(__A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase = np.random.randint(0 , len(__A ) )
__UpperCamelCase = True
if isinstance(input_mel[0] , __A ):
__UpperCamelCase = [np.asarray(__A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase = [[longer] for longer in is_longer]
__UpperCamelCase = {'input_features': input_mel, 'is_longer': is_longer}
__UpperCamelCase = BatchFeature(__A )
if return_tensors is not None:
__UpperCamelCase = input_features.convert_to_tensors(__A )
return input_features
| 434
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple =logging.get_logger(__name__)
a__ : int ={
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="falcon"
SCREAMING_SNAKE_CASE_ : Any =["past_key_values"]
def __init__( self : List[str] , __A : Dict=6_5_0_2_4 , __A : Dict=4_5_4_4 , __A : List[Any]=3_2 , __A : Dict=7_1 , __A : List[Any]=1e-5 , __A : Dict=0.02 , __A : Any=True , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : Dict=None , __A : List[Any]=False , __A : Optional[int]=False , __A : Union[str, Any]=True , __A : str=True , __A : List[Any]=False , __A : List[Any]=1_1 , __A : int=1_1 , **__A : List[str] , ):
__UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
__UpperCamelCase = kwargs.pop('n_embed' , __A )
__UpperCamelCase = hidden_size if n_embed is None else n_embed
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
__UpperCamelCase = alibi
__UpperCamelCase = new_decoder_architecture
__UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
__UpperCamelCase = parallel_attn
__UpperCamelCase = bias
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@property
def _lowerCamelCase ( self : List[str] ):
return self.hidden_size // self.num_attention_heads
@property
def _lowerCamelCase ( self : Any ):
return not self.alibi
| 434
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowercase ( __lowerCamelCase ):
def UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
A_ = SMALL_MODEL_IDENTIFIER
A_ = '''pt'''
A_ = '''tf'''
def UpperCamelCase ( self : Optional[int] , lowerCamelCase__ : int ) -> Any:
"""simple docstring"""
A_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = TFAutoModel.from_pretrained(self.test_model , from_pt=lowerCamelCase__ )
model_tf.save_pretrained(lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
A_ = '''mock_framework'''
# Framework provided - return whatever the user provides
A_ = FeaturesManager.determine_framework(self.test_model , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
A_ = FeaturesManager.determine_framework(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
A_ = FeaturesManager.determine_framework(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowerCamelCase__ )
A_ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowerCamelCase__ )
A_ = FeaturesManager.determine_framework(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowerCamelCase__ ):
A_ = FeaturesManager.determine_framework(lowerCamelCase__ )
def UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A_ = MagicMock(return_value=lowerCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowerCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A_ = MagicMock(return_value=lowerCamelCase__ )
with patch('''transformers.onnx.features.is_torch_available''' , lowerCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
A_ = MagicMock(return_value=lowerCamelCase__ )
A_ = MagicMock(return_value=lowerCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowerCamelCase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , lowerCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowerCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
A_ = MagicMock(return_value=lowerCamelCase__ )
A_ = MagicMock(return_value=lowerCamelCase__ )
with patch('''transformers.onnx.features.is_tf_available''' , lowerCamelCase__ ), patch(
'''transformers.onnx.features.is_torch_available''' , lowerCamelCase__ ):
with self.assertRaises(lowerCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
| 203
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowercase :
def __init__( self : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any]=1_3 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=9_9 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : int=3_7 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : int=5_1_2 , lowerCamelCase__ : Optional[Any]=1_6 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[str]=0.02 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : List[Any]="None" , lowerCamelCase__ : int=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : List[str]=None , ) -> List[str]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = relative_attention
A_ = position_biased_input
A_ = pos_att_type
A_ = scope
def UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
A_ = TFDebertaVaForMaskedLM(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFDebertaVaForSequenceClassification(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFDebertaVaForTokenClassification(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaForQuestionAnswering(config=lowerCamelCase__ )
A_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Union[str, Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : Optional[Any] = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : List[str] = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
A_ = TFDebertaVaModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(lowerCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
A_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
A_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
A_ = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1e-4 )
| 203
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def snake_case_ ( a__ : int ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a_ ,a_ )
def snake_case_ ( a__ : Optional[Any] ):
"""simple docstring"""
__lowercase = emb.weight.shape
__lowercase = nn.Linear(a_ ,a_ ,bias=a_ )
__lowercase = emb.weight.data
return lin_layer
def snake_case_ ( a__ : int ):
"""simple docstring"""
__lowercase = torch.load(a_ ,map_location="""cpu""" )
__lowercase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__lowercase = mam_aaa['''model''']
remove_ignore_keys_(a_ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MaMaaaConfig(
vocab_size=a_ ,max_position_embeddings=10_24 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="""relu""" ,)
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MaMaaaForConditionalGeneration(a_ )
model.model.load_state_dict(a_ ,strict=a_ )
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
A : Optional[Any] = parser.parse_args()
A : str = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 713
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__lowercase = GPTNeoXModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
__lowercase = output_from_no_past["""hidden_states"""][0]
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , __A , unittest.TestCase ):
snake_case_ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ : str = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : Tuple = False
snake_case_ : Any = False
snake_case_ : int = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=64 , num_attention_heads=8 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = GPTNeoXModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase__ )
__lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 163
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=__lowerCamelCase , )
assert hasattr(self , """env""" )
def _UpperCAmelCase ( self , snake_case ) -> int:
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
lowercase : Optional[int] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowercase : Optional[int] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowercase : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowercase : Tuple = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="""py36""" , )
def _UpperCAmelCase ( self , snake_case ) -> Tuple:
"""simple docstring"""
TrainingJobAnalytics(__lowerCamelCase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _UpperCAmelCase ( self , snake_case ) -> Any:
"""simple docstring"""
# create estimator
lowercase : Optional[Any] = self.create_estimator(__lowerCamelCase )
# run training
estimator.fit()
# result dataframe
lowercase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowercase : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCamelCase )
| 607
|
import os
def __a ( ):
SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(A__ ) , "num.txt" )
with open(A__ ) as file_hand:
return str(sum(int(A__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 16
| 0
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A__ : Tuple = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A__ : Union[str, Any] = logging.WARNING
def _snake_case ( ) -> Union[str, Any]:
lowerCamelCase_ : List[str] =os.getenv("DATASETS_VERBOSITY" , lowerCamelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def _snake_case ( ) -> str:
return __name__.split("." )[0]
def _snake_case ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def _snake_case ( ) -> None:
# Apply our default configuration to the library root logger.
lowerCamelCase_ : Tuple =_get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _snake_case ( ) -> None:
lowerCamelCase_ : Tuple =_get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _snake_case ( lowerCamelCase__ : Optional[str] = None ) -> logging.Logger:
if name is None:
lowerCamelCase_ : int =_get_library_name()
return logging.getLogger(lowerCamelCase__ )
def _snake_case ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def _snake_case ( lowerCamelCase__ : int ) -> None:
_get_library_root_logger().setLevel(lowerCamelCase__ )
def _snake_case ( ) -> Optional[Any]:
return set_verbosity(lowerCamelCase__ )
def _snake_case ( ) -> Optional[int]:
return set_verbosity(lowerCamelCase__ )
def _snake_case ( ) -> List[str]:
return set_verbosity(lowerCamelCase__ )
def _snake_case ( ) -> List[Any]:
return set_verbosity(lowerCamelCase__ )
def _snake_case ( ) -> None:
lowerCamelCase_ : Optional[int] =False
def _snake_case ( ) -> None:
lowerCamelCase_ : List[str] =True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase__ :
def __init__( self : Optional[Any] , *snake_case__ : Any , **snake_case__ : Optional[Any] ): # pylint: disable=unused-argument
lowerCamelCase_ : Dict =args[0] if args else None
def __iter__( self : Dict ):
return iter(self._iterator )
def __getattr__( self : Tuple , snake_case__ : Dict ):
def empty_fn(*snake_case__ : Dict , **snake_case__ : Dict ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Union[str, Any] ):
return self
def __exit__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
return
A__ : Any = True
class lowercase__ :
def __call__( self : Union[str, Any] , *snake_case__ : Union[str, Any] , snake_case__ : List[Any]=False , **snake_case__ : List[Any] ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case__ , **snake_case__ )
else:
return EmptyTqdm(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : List[str] , *snake_case__ : List[str] , **snake_case__ : Optional[int] ):
lowerCamelCase_ : Any =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case__ , **snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__ : Union[str, Any] = _tqdm_cls()
def _snake_case ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def _snake_case ( ) -> Any:
global _tqdm_active
lowerCamelCase_ : Any =True
def _snake_case ( ) -> List[Any]:
global _tqdm_active
lowerCamelCase_ : List[str] =False
| 713
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
A__ : int = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def _snake_case ( ) -> List[Any]:
lowerCamelCase_ : Optional[Any] =Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase_ : Any =g.get_repo("huggingface/transformers" )
lowerCamelCase_ : Tuple =repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase_ : Optional[Any] =sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 244
| 0
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCAmelCase ( UpperCAmelCase )-> List[str]:
'''simple docstring'''
if isinstance(UpperCAmelCase__ ,collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class snake_case :
'''simple docstring'''
def _lowercase ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : Any ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def _lowercase ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.abs((a - b) ).max()
self.assertLessEqual(_UpperCamelCase , _UpperCamelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(input_ids=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(input_ids=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _lowercase ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(input_ids=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(input_ids=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = after_output[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase , 1e-3 )
def _lowercase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_vision_text_model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = {'''vision_model''': vision_model, '''text_model''': text_model}
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model(
input_ids=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , output_attentions=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = output.vision_model_output.attentions
self.assertEqual(len(_UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ = output.text_model_output.attentions
self.assertEqual(len(_UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowercase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
pt_model.to(_UpperCamelCase )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ = inputs_dict
SCREAMING_SNAKE_CASE_ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model(**_UpperCamelCase ).to_tuple()
SCREAMING_SNAKE_CASE_ = fx_model(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCamelCase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCamelCase , from_pt=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = fx_model_loaded(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_UpperCamelCase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel.from_pretrained(_UpperCamelCase , from_flax=_UpperCamelCase )
pt_model_loaded.to(_UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = pt_model_loaded(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_UpperCamelCase , pt_output_loaded.numpy() , 4e-2 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = fx_state
self.check_pt_flax_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderConfig.from_vision_text_configs(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = load_flax_weights_in_pytorch_model(_UpperCamelCase , fx_model.params )
self.check_pt_flax_equivalence(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_UpperCamelCase )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_UpperCamelCase )
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_save_load(**_UpperCamelCase )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_UpperCamelCase )
@is_pt_flax_cross_test
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''vision_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict.pop('''text_config''' )
SCREAMING_SNAKE_CASE_ = config_inputs_dict
self.check_equivalence_pt_to_flax(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.check_equivalence_flax_to_pt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@slow
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ = model_a(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = model_a(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = after_outputs[0]
SCREAMING_SNAKE_CASE_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase , 1e-5 )
@require_flax
class snake_case ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_UpperCamelCase , text_from_pt=_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowercase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxViTModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_UpperCamelCase )
return vision_model, text_model
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class snake_case ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=_UpperCamelCase , text_from_pt=_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowercase ( self : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModel(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ = FlaxBertModel(_UpperCamelCase )
return vision_model, text_model
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE_ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=_UpperCamelCase , padding=_UpperCamelCase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE_ = model(**_UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
SCREAMING_SNAKE_CASE_ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _UpperCamelCase , atol=1e-3 ) )
| 393
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Optional[int] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 403
| 0
|
"""simple docstring"""
lowerCAmelCase_ = 65_521
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for plain_chr in plain_text:
_UpperCAmelCase = (a + ord(SCREAMING_SNAKE_CASE )) % MOD_ADLER
_UpperCAmelCase = (b + a) % MOD_ADLER
return (b << 16) | a
| 494
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 494
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 320
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''KT''')
_lowerCAmelCase = TypeVar('''VT''')
class lowerCAmelCase_( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = "root" ,__UpperCAmelCase = None ) -> Tuple:
lowerCAmelCase__ : str = key
lowerCAmelCase__ : List[Any] = value
lowerCAmelCase__ : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return F"""Node({self.key}: {self.value})"""
@property
def UpperCAmelCase_ ( self ) -> int:
return len(self.forward )
class lowerCAmelCase_( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = 0.5 ,__UpperCAmelCase = 16 ) -> List[Any]:
lowerCAmelCase__ : Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : List[Any] = p
lowerCAmelCase__ : List[str] = max_level
def __str__( self ) -> str:
lowerCAmelCase__ : List[Any] = list(self )
if len(__UpperCAmelCase ) == 0:
return F"""SkipList(level={self.level})"""
lowerCAmelCase__ : str = max((len(str(__UpperCAmelCase ) ) for item in items) ,default=4 )
lowerCAmelCase__ : Tuple = max(__UpperCAmelCase ,4 ) + 4
lowerCAmelCase__ : int = self.head
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : Optional[Any] = node.forward.copy()
lines.append(F"""[{node.key}]""".ljust(__UpperCAmelCase ,"""-""" ) + """* """ * len(__UpperCAmelCase ) )
lines.append(""" """ * label_size + """| """ * len(__UpperCAmelCase ) )
while len(node.forward ) != 0:
lowerCAmelCase__ : int = node.forward[0]
lines.append(
F"""[{node.key}]""".ljust(__UpperCAmelCase ,"""-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__UpperCAmelCase ) )
lowerCAmelCase__ : int = node.forward
lines.append("""None""".ljust(__UpperCAmelCase ) + """* """ * len(__UpperCAmelCase ) )
return F"""SkipList(level={self.level})\n""" + "\n".join(__UpperCAmelCase )
def __iter__( self ) -> int:
lowerCAmelCase__ : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ : List[Any] = node.forward[0]
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ : int = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._locate_node(__UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(__UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ : Tuple = node.forward[i]
else:
lowerCAmelCase__ : int = update_node.forward[:i]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._locate_node(__UpperCAmelCase )
if node is not None:
lowerCAmelCase__ : Dict = value
else:
lowerCAmelCase__ : Optional[int] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,__UpperCAmelCase ):
update_vector.append(self.head )
lowerCAmelCase__ : int = level
lowerCAmelCase__ : Dict = Node(__UpperCAmelCase ,__UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = new_node
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> VT | None:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._locate_node(__UpperCAmelCase )
if node is not None:
return node.value
return None
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
lowerCAmelCase__ : Tuple = skip_list.head
lowerCAmelCase__ : Union[str, Any] = {}
while node.level != 0:
lowerCAmelCase__ : List[str] = node.forward[0]
lowerCAmelCase__ : List[str] = node.value
assert len(UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
lowerCAmelCase__ : List[Any] = skip_list.head
lowerCAmelCase__ : List[Any] = {}
while node.level != 0:
lowerCAmelCase__ : Union[str, Any] = node.forward[0]
lowerCAmelCase__ : List[Any] = node.value
if len(UpperCamelCase ) != 4:
print()
assert len(UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = SkipList()
assert skip_list.find("""Some key""" ) is None
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(UpperCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
def is_sorted(UpperCamelCase ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase , lst[1:] ) )
lowerCAmelCase__ : str = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase , UpperCamelCase )
assert is_sorted(list(UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 565
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A_ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """resnet"""
_UpperCamelCase : int = ["""basic""", """bottleneck"""]
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ):
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = downsample_in_first_stage
lowercase = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(snake_case ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Any = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1E-3
| 565
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
UpperCAmelCase = '''▁'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case , snake_case="</s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case=100 , snake_case=None , snake_case = None , snake_case=True , **snake_case , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase = [F'''<extra_id_{i}>''' for i in range(snake_case )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase = len(set(filter(lambda snake_case : bool('extra_id' in str(snake_case ) ) , snake_case ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
lowercase = legacy
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , extra_ids=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case , **snake_case , )
lowercase = vocab_file
lowercase = extra_ids
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case )) + [1]
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ):
return list(
set(filter(lambda snake_case : bool(re.search(r'<extra_id_\d+>' , snake_case ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ):
return [self._convert_token_to_id(snake_case ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if len(snake_case ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._add_eos_if_not_present(snake_case )
if token_ids_a is None:
return token_ids_a
else:
lowercase = self._add_eos_if_not_present(snake_case )
return token_ids_a + token_ids_a
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowercase = SPIECE_UNDERLINE + text.replace(snake_case , ' ' )
return super().tokenize(snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , **snake_case ):
if not self.legacy:
lowercase = text.startswith(snake_case )
if is_first:
lowercase = text[1:]
lowercase = self.sp_model.encode(snake_case , out_type=snake_case )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case ):
lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token.startswith('<extra_id_' ):
lowercase = re.match(r'<extra_id_(\d+)>' , snake_case )
lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index < self.sp_model.get_piece_size():
lowercase = self.sp_model.IdToPiece(snake_case )
else:
lowercase = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
lowercase = ''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(snake_case )
lowercase = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 565
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _UpperCamelCase ( __lowercase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""philschmid/bart-large-cnn-samsum"""
__UpperCAmelCase : Optional[int] =(
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
__UpperCAmelCase : Optional[Any] ="""summarizer"""
__UpperCAmelCase : Dict =AutoTokenizer
__UpperCAmelCase : Optional[int] =AutoModelForSeqaSeqLM
__UpperCAmelCase : Tuple =["""text"""]
__UpperCAmelCase : List[str] =["""text"""]
def snake_case ( self , __a ):
return self.pre_processor(__lowerCamelCase , return_tensors="pt" , truncation=__lowerCamelCase )
def snake_case ( self , __a ):
return self.model.generate(**__lowerCamelCase )[0]
def snake_case ( self , __a ):
return self.pre_processor.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
| 636
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ ={
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE : Dict = XLNetConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_SCREAMING_SNAKE_CASE : Tuple = finetuning_task
_SCREAMING_SNAKE_CASE : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE : Optional[int] = XLNetForSequenceClassification(__lowerCamelCase )
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE : Union[str, Any] = finetuning_task
_SCREAMING_SNAKE_CASE : Tuple = XLNetForQuestionAnswering(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = XLNetLMHeadModel(__lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE : int = os.path.join(__lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(__lowerCamelCase, __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict(), __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 249
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any =9.8_0665
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = g ) ->float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 558
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] ={
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = """sew-d"""
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-7 , _lowercase=1E-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ) -> str:
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : str = feat_extract_norm
_lowerCamelCase : int = feat_extract_activation
_lowerCamelCase : Optional[int] = list(_lowercase )
_lowerCamelCase : Any = list(_lowercase )
_lowerCamelCase : Dict = list(_lowercase )
_lowerCamelCase : List[Any] = conv_bias
_lowerCamelCase : Dict = num_conv_pos_embeddings
_lowerCamelCase : Optional[int] = num_conv_pos_embedding_groups
_lowerCamelCase : Dict = len(self.conv_dim )
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Optional[int] = squeeze_factor
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Any = position_buckets
_lowerCamelCase : str = share_att_key
_lowerCamelCase : Optional[int] = relative_attention
_lowerCamelCase : Tuple = norm_rel_ebd
_lowerCamelCase : Union[str, Any] = list(_lowercase )
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : str = hidden_dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Union[str, Any] = feat_proj_dropout
_lowerCamelCase : int = final_dropout
_lowerCamelCase : int = layer_norm_eps
_lowerCamelCase : Dict = feature_layer_norm_eps
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Union[str, Any] = apply_spec_augment
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[str] = mask_time_min_masks
_lowerCamelCase : Optional[int] = mask_feature_prob
_lowerCamelCase : List[str] = mask_feature_length
_lowerCamelCase : int = mask_feature_min_masks
# ctc loss
_lowerCamelCase : int = ctc_loss_reduction
_lowerCamelCase : List[Any] = ctc_zero_infinity
# sequence classification
_lowerCamelCase : Optional[int] = use_weighted_layer_sum
_lowerCamelCase : List[Any] = classifier_proj_size
@property
def a__ ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 558
| 1
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase (snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = tokenizer(example["""content"""] , truncation=snake_case__ )["""input_ids"""]
lowerCAmelCase = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 169
|
"""simple docstring"""
def lowercase (snake_case__ : int = 200 ) -> int:
'''simple docstring'''
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase = [0] * (pence + 1)
lowerCAmelCase = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 169
| 1
|
'''simple docstring'''
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 687
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__snake_case : Union[str, Any] = logging.getLogger(__name__)
__snake_case : int = tf.data.AUTOTUNE
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : str =argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""", type=__snake_case, default="""roberta-base""", help="""The model config to use. Note that we don't copy the model's weights, only the config!""", )
parser.add_argument(
"""--tokenizer""", type=__snake_case, default="""unigram-tokenizer-wikitext""", help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""", )
parser.add_argument(
"""--per_replica_batch_size""", type=__snake_case, default=8, help="""Batch size per TPU core.""", )
parser.add_argument(
"""--no_tpu""", action="""store_true""", help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""", )
parser.add_argument(
"""--tpu_name""", type=__snake_case, help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""", default="""local""", )
parser.add_argument(
"""--tpu_zone""", type=__snake_case, help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""", )
parser.add_argument(
"""--gcp_project""", type=__snake_case, help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""", action="""store_true""", help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""", )
parser.add_argument(
"""--train_dataset""", type=__snake_case, help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--shuffle_buffer_size""", type=__snake_case, default=2**18, help="""Size of the shuffle buffer (in samples)""", )
parser.add_argument(
"""--eval_dataset""", type=__snake_case, help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""", )
parser.add_argument(
"""--num_epochs""", type=__snake_case, default=1, help="""Number of epochs to train for.""", )
parser.add_argument(
"""--learning_rate""", type=__snake_case, default=1E-4, help="""Learning rate to use for training.""", )
parser.add_argument(
"""--weight_decay_rate""", type=__snake_case, default=1E-3, help="""Weight decay rate to use for training.""", )
parser.add_argument(
"""--max_length""", type=__snake_case, default=512, help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""", )
parser.add_argument(
"""--mlm_probability""", type=__snake_case, default=0.15, help="""Fraction of tokens to mask during training.""", )
parser.add_argument("""--output_dir""", type=__snake_case, required=__snake_case, help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""", type=__snake_case, help="""Model ID to upload to on the Hugging Face Hub.""" )
A__ : Optional[Any] =parser.parse_args()
return args
def __lowerCamelCase ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
try:
if args.tpu_name:
A__ : List[Any] =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name, zone=args.tpu_zone, project=args.gcp_project )
else:
A__ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(__snake_case )
tf.tpu.experimental.initialize_tpu_system(__snake_case )
return tpu
def __lowerCamelCase ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =0
for file in file_list:
A__ : Optional[int] =file.split("""/""" )[-1]
A__ : Union[str, Any] =re.search(r"""-\d+-(\d+)\.tfrecord""", __snake_case ).group(1 )
A__ : str =int(__snake_case )
num_samples += sample_count
return num_samples
def __lowerCamelCase ( __snake_case : List[str], __snake_case : int, __snake_case : Any, __snake_case : List[Any], __snake_case : int, __snake_case : List[Any]=None ) -> Optional[int]:
"""simple docstring"""
A__ : List[str] =count_samples(__snake_case )
A__ : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__snake_case )
if shuffle:
A__ : Optional[int] =dataset.shuffle(len(__snake_case ) )
A__ : List[str] =tf.data.TFRecordDataset(__snake_case, num_parallel_reads=__snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
A__ : int =dataset.apply(tf.data.experimental.assert_cardinality(__snake_case ) )
A__ : Any =dataset.map(__snake_case, num_parallel_calls=__snake_case )
if shuffle:
assert shuffle_buffer_size is not None
A__ : List[Any] =dataset.shuffle(args.shuffle_buffer_size )
A__ : int =dataset.batch(__snake_case, drop_remainder=__snake_case )
A__ : Optional[int] =dataset.map(__snake_case, num_parallel_calls=__snake_case )
A__ : Tuple =dataset.prefetch(__snake_case )
return dataset
def __lowerCamelCase ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not args.no_tpu:
A__ : Dict =initialize_tpu(__snake_case )
A__ : int =tf.distribute.TPUStrategy(__snake_case )
else:
A__ : List[str] =tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
A__ : Tuple =AutoTokenizer.from_pretrained(args.tokenizer )
A__ : List[str] =AutoConfig.from_pretrained(args.pretrained_model_config )
A__ : Optional[Any] =tokenizer.vocab_size
A__ : Tuple =tf.io.gfile.glob(os.path.join(args.train_dataset, """*.tfrecord""" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
A__ : Optional[Any] =tf.io.gfile.glob(os.path.join(args.eval_dataset, """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
A__ : Optional[Any] =count_samples(__snake_case )
A__ : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
A__ : str =steps_per_epoch * args.num_epochs
with strategy.scope():
A__ : List[str] =TFAutoModelForMaskedLM.from_config(__snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
A__ , A__ : Optional[Any] =create_optimizer(
num_train_steps=__snake_case, num_warmup_steps=total_train_steps // 20, init_lr=args.learning_rate, weight_decay_rate=args.weight_decay_rate, )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__snake_case, metrics=["""accuracy"""] )
def decode_fn(__snake_case : Tuple ):
A__ : Dict ={
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa, shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__snake_case, __snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
A__ : List[Any] =DataCollatorForLanguageModeling(
tokenizer=__snake_case, mlm_probability=args.mlm_probability, mlm=__snake_case, return_tensors="""tf""" )
def mask_with_collator(__snake_case : Optional[int] ):
# TF really needs an isin() function
A__ : Union[str, Any] =(
~tf.cast(batch["""attention_mask"""], tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
A__ , A__ : List[str] =data_collator.tf_mask_tokens(
batch["""input_ids"""], vocab_size=len(__snake_case ), mask_token_id=tokenizer.mask_token_id, special_tokens_mask=__snake_case, )
return batch
A__ : List[Any] =args.per_replica_batch_size * strategy.num_replicas_in_sync
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, shuffle_buffer_size=args.shuffle_buffer_size, )
A__ : List[str] =prepare_dataset(
__snake_case, decode_fn=__snake_case, mask_fn=__snake_case, batch_size=__snake_case, shuffle=__snake_case, )
A__ : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir, hub_model_id=args.hub_model_id, tokenizer=__snake_case ) )
model.fit(
__snake_case, validation_data=__snake_case, epochs=args.num_epochs, callbacks=__snake_case, )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__snake_case : str = parse_args()
main(args)
| 687
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self , *__a , **__a ):
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 260
| 0
|
import math
from collections.abc import Callable
def lowerCamelCase_ ( UpperCamelCase__ : Callable[[float], float], UpperCamelCase__ : float, UpperCamelCase__ : float ):
'''simple docstring'''
UpperCamelCase__ = xa
UpperCamelCase__ = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
UpperCamelCase__ = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase__ = x_na
UpperCamelCase__ = x_na
def lowerCamelCase_ ( UpperCamelCase__ : float ):
'''simple docstring'''
return math.pow(UpperCamelCase__, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 591
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )
UpperCamelCase__ = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt['''params''']
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(UpperCamelCase__, (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt['''dico_word2id''']
UpperCamelCase__ = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''', '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase__, UpperCamelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 591
| 1
|
import datasets
from .evaluate import evaluate
snake_case__ : int = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
snake_case__ : Union[str, Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
snake_case__ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
UpperCamelCase_ = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
UpperCamelCase_ = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase )
return score
| 23
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__lowerCAmelCase : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRQuestionEncoderTokenizer
__lowerCAmelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__lowerCAmelCase : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__lowerCAmelCase : str = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_A )
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
__magic_name__ = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
__magic_name__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.'''
__magic_name__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
__magic_name__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
__magic_name__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
__magic_name__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__magic_name__ = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def _lowercase ( self : List[Any] , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__magic_name__ = reader_input["""input_ids"""]
__magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3]
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
__magic_name__ = []
for doc_id in sorted_docs:
__magic_name__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__magic_name__ = sequence_ids.index(self.pad_token_id )
else:
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__magic_name__ = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__magic_name__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
__magic_name__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__magic_name__ = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_A )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
a__ = DPRReaderTokenizer
| 529
| 0
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCAmelCase :Optional[int] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __lowerCAmelCase ( a_ , a_ ) -> List[Any]:
'''simple docstring'''
inspect_dataset(a_ , a_ )
SCREAMING_SNAKE_CASE : str = path + '.py'
assert script_name in os.listdir(a_ )
assert "__pycache__" not in os.listdir(a_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __lowerCAmelCase ( a_ , a_ ) -> Tuple:
'''simple docstring'''
inspect_metric(a_ , a_ )
SCREAMING_SNAKE_CASE : List[str] = path + '.py'
assert script_name in os.listdir(a_ )
assert "__pycache__" not in os.listdir(a_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_dataset_config_info(a_ , config_name=a_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> str:
'''simple docstring'''
with pytest.raises(a_ ):
get_dataset_config_info(a_ , config_name=a_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __lowerCAmelCase ( a_ , a_ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_dataset_config_names(a_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = get_dataset_infos(a_ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE : Tuple = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = get_dataset_infos(a_ )
assert expected_config in infos
SCREAMING_SNAKE_CASE : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __lowerCAmelCase ( a_ , a_ , a_ ) -> List[str]:
'''simple docstring'''
with pytest.raises(a_ ):
get_dataset_split_names(a_ , config_name=a_ )
| 179
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase__ ) -> None:
SCREAMING_SNAKE_CASE : List[str] = data
# Initialize hash values
SCREAMING_SNAKE_CASE : str = [
0X6A_09_E6_67,
0XBB_67_AE_85,
0X3C_6E_F3_72,
0XA5_4F_F5_3A,
0X51_0E_52_7F,
0X9B_05_68_8C,
0X1F_83_D9_AB,
0X5B_E0_CD_19,
]
# Initialize round constants
SCREAMING_SNAKE_CASE : List[Any] = [
0X42_8A_2F_98,
0X71_37_44_91,
0XB5_C0_FB_CF,
0XE9_B5_DB_A5,
0X39_56_C2_5B,
0X59_F1_11_F1,
0X92_3F_82_A4,
0XAB_1C_5E_D5,
0XD8_07_AA_98,
0X12_83_5B_01,
0X24_31_85_BE,
0X55_0C_7D_C3,
0X72_BE_5D_74,
0X80_DE_B1_FE,
0X9B_DC_06_A7,
0XC1_9B_F1_74,
0XE4_9B_69_C1,
0XEF_BE_47_86,
0X0F_C1_9D_C6,
0X24_0C_A1_CC,
0X2D_E9_2C_6F,
0X4A_74_84_AA,
0X5C_B0_A9_DC,
0X76_F9_88_DA,
0X98_3E_51_52,
0XA8_31_C6_6D,
0XB0_03_27_C8,
0XBF_59_7F_C7,
0XC6_E0_0B_F3,
0XD5_A7_91_47,
0X06_CA_63_51,
0X14_29_29_67,
0X27_B7_0A_85,
0X2E_1B_21_38,
0X4D_2C_6D_FC,
0X53_38_0D_13,
0X65_0A_73_54,
0X76_6A_0A_BB,
0X81_C2_C9_2E,
0X92_72_2C_85,
0XA2_BF_E8_A1,
0XA8_1A_66_4B,
0XC2_4B_8B_70,
0XC7_6C_51_A3,
0XD1_92_E8_19,
0XD6_99_06_24,
0XF4_0E_35_85,
0X10_6A_A0_70,
0X19_A4_C1_16,
0X1E_37_6C_08,
0X27_48_77_4C,
0X34_B0_BC_B5,
0X39_1C_0C_B3,
0X4E_D8_AA_4A,
0X5B_9C_CA_4F,
0X68_2E_6F_F3,
0X74_8F_82_EE,
0X78_A5_63_6F,
0X84_C8_78_14,
0X8C_C7_02_08,
0X90_BE_FF_FA,
0XA4_50_6C_EB,
0XBE_F9_A3_F7,
0XC6_71_78_F2,
]
SCREAMING_SNAKE_CASE : List[Any] = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> bytes:
SCREAMING_SNAKE_CASE : str = B'\x80' + (B'\x00' * (63 - (len(lowercase__ ) + 8) % 64))
SCREAMING_SNAKE_CASE : Union[str, Any] = struct.pack('>Q' , (len(lowercase__ ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ) -> None:
# Convert into blocks of 64 bytes
SCREAMING_SNAKE_CASE : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
SCREAMING_SNAKE_CASE : Union[str, Any] = list(struct.unpack('>16L' , lowercase__ ) )
# add 48 0-ed integers
words += [0] * 48
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
SCREAMING_SNAKE_CASE : Dict = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
SCREAMING_SNAKE_CASE : Optional[int] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
SCREAMING_SNAKE_CASE : Tuple = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_00_00_00_00
# Compression
SCREAMING_SNAKE_CASE : Any = self.ror(lowercase__ , 6 ) ^ self.ror(lowercase__ , 11 ) ^ self.ror(lowercase__ , 25 )
SCREAMING_SNAKE_CASE : Any = (e & f) ^ ((~e & 0XFF_FF_FF_FF) & g)
SCREAMING_SNAKE_CASE : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE : Any = self.ror(lowercase__ , 2 ) ^ self.ror(lowercase__ , 13 ) ^ self.ror(lowercase__ , 22 )
SCREAMING_SNAKE_CASE : str = (a & b) ^ (a & c) ^ (b & c)
SCREAMING_SNAKE_CASE : Tuple = (sa + maj) % 0X1_00_00_00_00
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = (
g,
f,
e,
((d + tempa) % 0X1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0X1_00_00_00_00),
)
SCREAMING_SNAKE_CASE : Union[str, Any] = [a, b, c, d, e, f, g, h]
# Modify final values
SCREAMING_SNAKE_CASE : Tuple = [
((element + mutated_hash_values[index]) % 0X1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
SCREAMING_SNAKE_CASE : Tuple = ''.join([hex(lowercase__ )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self , lowercase__ , lowercase__ ) -> int:
return 0XFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> None:
import hashlib
SCREAMING_SNAKE_CASE : Optional[Any] = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(lowercase__ ).hash , hashlib.shaaaa(lowercase__ ).hexdigest() )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
else:
SCREAMING_SNAKE_CASE : Any = bytes(a_ , 'utf-8' )
print(SHAaaa(a_ ).hash )
if __name__ == "__main__":
main()
| 179
| 1
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __a (enum.Enum):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = 0
_SCREAMING_SNAKE_CASE :List[Any] = 1
_SCREAMING_SNAKE_CASE :Dict = 2
@add_end_docstrings(UpperCamelCase_)
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *_a , **_a ) -> Tuple:
"""simple docstring"""
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : Any = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self._sanitize_parameters(prefix=_a , **self._forward_params )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {**self._forward_params, **forward_params}
def _a ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : Dict = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
""" [None, 'hole']""" )
SCREAMING_SNAKE_CASE__ : int = handle_long_generation
preprocess_params.update(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs
SCREAMING_SNAKE_CASE__ : int = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : List[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
SCREAMING_SNAKE_CASE__ : Tuple = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : int = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self , *_a , **_a ) -> Any:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> Optional[int]:
"""simple docstring"""
return super().__call__(_a , **_a )
def _a ( self , _a , _a="" , _a=None , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generate_kwargs["""max_new_tokens"""]
else:
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _a ( self , _a , **_a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.get("""attention_mask""" , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : List[str] = """max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : int = generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : Dict = """min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : str = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs["""generated_sequence"""][0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs["""input_ids"""]
SCREAMING_SNAKE_CASE__ : str = model_outputs["""prompt_text"""]
SCREAMING_SNAKE_CASE__ : Any = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Tuple = {"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Dict = 0
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""generated_text""": all_text}
records.append(_a )
return records
| 680
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a :str = 637_8137.0
a :Optional[Any] = 635_6752.31_4245
a :List[Any] = 6_378_137
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float:
SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2
SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Optional[int] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = ['''DeiTFeatureExtractor''']
A__ : List[Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 124
|
'''simple docstring'''
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any]=10_00 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__snake_case : Optional[int] = n - 1
__snake_case : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__snake_case : Tuple = 0
while count < prec:
__snake_case : Union[str, Any] = random.randint(2 ,n - 1 )
__snake_case : Optional[int] = bin_exp_mod(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if b != 1:
__snake_case : Dict = True
for _ in range(_UpperCAmelCase ):
if b == n - 1:
__snake_case : Any = False
break
__snake_case : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A__ : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 124
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = """visual_bert"""
def __init__(self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = visual_embedding_dim
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Dict = bypass_transformer
_UpperCAmelCase : Union[str, Any] = special_visual_initialize
| 414
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __lowerCAmelCase ( __a ):
snake_case : Union[List[PIL.Image.Image], np.ndarray]
snake_case : Optional[List[bool]]
snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 414
| 1
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
lowerCamelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = BioGptForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# create attention mask
lowerCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
lowerCamelCase__ = self.seq_length // 2
lowerCamelCase__ = 0
# first forward pass
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase__ = ids_tensor((1,) , lowerCamelCase_ ).item() + 1
lowerCamelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase__ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_ )] , dim=1 , )
# get two different outputs
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""]
lowerCamelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""]
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
lowerCamelCase__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
# first forward pass
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCamelCase__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["""last_hidden_state"""]
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = BioGptForCausalLM(lowerCamelCase_ )
model.to(lowerCamelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __lowerCamelCase ( self , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = BioGptModel(lowerCamelCase_ )
lowerCamelCase__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , *__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = BioGptForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
lowerCamelCase__
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BioGptModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
lowerCamelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ = """left"""
# Define PAD Token = EOS Token = 50256
lowerCamelCase__ = tokenizer.eos_token
lowerCamelCase__ = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase__ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ )
lowerCamelCase__ = inputs["""input_ids"""].to(lowerCamelCase_ )
lowerCamelCase__ = model.generate(
input_ids=lowerCamelCase_ , attention_mask=inputs['''attention_mask'''].to(lowerCamelCase_ ) , )
lowerCamelCase__ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
lowerCamelCase__ = model.generate(input_ids=lowerCamelCase_ )
lowerCamelCase__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
lowerCamelCase__ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(lowerCamelCase_ )
lowerCamelCase__ = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
lowerCamelCase__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = BioGptModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = input_dict["""input_ids"""]
lowerCamelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCamelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ = 3
lowerCamelCase__ = """multi_label_classification"""
lowerCamelCase__ = input_dict["""input_ids"""]
lowerCamelCase__ = input_ids.ne(1 ).to(lowerCamelCase_ )
lowerCamelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
lowerCamelCase__ = model(lowerCamelCase_ )[0]
lowerCamelCase__ = 4_2_3_8_4
lowerCamelCase__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
lowerCamelCase__ = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
lowerCamelCase__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(lowerCamelCase_ )
torch.manual_seed(0 )
lowerCamelCase__ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(lowerCamelCase_ )
lowerCamelCase__ = model.generate(
**lowerCamelCase_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowerCamelCase_ , )
lowerCamelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
lowerCamelCase__ = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 712
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
_a = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
_a = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ,__snake_case=False ,__snake_case=True ,__snake_case=False ,__snake_case="dummy_doc" ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = {doc: key_lines}
lowerCamelCase__ = {doc: sys_lines}
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase__ = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase__ , lowerCamelCase__ = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = reader.get_mention_assignments(__snake_case ,__snake_case )
lowerCamelCase__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'''Number of resulting singleton clusters in the key '''
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'''files, respectively''' )
return doc_coref_infos
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = {}
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for name, metric in metrics:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) ,F'Recall: {recall * 100:.2f}' ,F' Precision: {precision * 100:.2f}' ,F' F1: {fa * 100:.2f}' ,)
if conll_subparts_num == 3:
lowerCamelCase__ = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase__ = line.split()[5]
if not parse_col == "-":
lowerCamelCase__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 29
| 0
|
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCAmelCase ( _snake_case ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Union[str, Any] ):
if tokenize_kwargs is None:
UpperCAmelCase__ :Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'''truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)''' )
UpperCAmelCase__ :List[str] = truncation
UpperCAmelCase__ :Optional[int] = tokenize_kwargs
UpperCAmelCase__ :List[Any] = {}
if return_tensors is not None:
UpperCAmelCase__ :Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
UpperCAmelCase__ :str = self.framework
UpperCAmelCase__ :Any = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCamelCase : List[str] ):
UpperCAmelCase__ :Any = self.model(**UpperCamelCase__ )
return model_outputs
def __SCREAMING_SNAKE_CASE ( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Any , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ):
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 467
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = sorted(zip(UpperCAmelCase__, UpperCAmelCase__ ), key=lambda UpperCAmelCase__ : x[0] / x[1], reverse=UpperCAmelCase__ )
A_ , A_ = [i[0] for i in r], [i[1] for i in r]
A_ = list(accumulate(UpperCAmelCase__ ) )
A_ = bisect(UpperCAmelCase__, UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 0
|
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = len(_A )
UpperCAmelCase : Optional[Any] = [[0] * n for i in range(_A )]
for i in range(_A ):
UpperCAmelCase : List[str] = y_points[i]
for i in range(2 , _A ):
for j in range(_A , _A ):
UpperCAmelCase : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7_6_8 ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : List[str] = proj_size
UpperCAmelCase : Union[str, Any] = CLIPVisionModel(snake_case )
UpperCAmelCase : Union[str, Any] = PaintByExampleMapper(snake_case )
UpperCAmelCase : List[Any] = nn.LayerNorm(config.hidden_size )
UpperCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A_ ( self , snake_case , snake_case=False ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model(pixel_values=snake_case )
UpperCAmelCase : Optional[Any] = clip_output.pooler_output
UpperCAmelCase : Optional[int] = self.mapper(latent_states[:, None] )
UpperCAmelCase : Tuple = self.final_layer_norm(snake_case )
UpperCAmelCase : int = self.proj_out(snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase : Optional[int] = config.hidden_size
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(snake_case , snake_case , snake_case , activation_fn="gelu" , attention_bias=snake_case )
for _ in range(snake_case )
] )
def A_ ( self , snake_case ):
'''simple docstring'''
for block in self.blocks:
UpperCAmelCase : str = block(snake_case )
return hidden_states
| 609
| 0
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A__ :
"""simple docstring"""
__A : List[str]
__A : Optional[str] = None
# Automatically constructed
__A : ClassVar[str] = "dict"
__A : ClassVar[Any] = None
__A : str = field(default='''Translation''' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__( self) -> Tuple:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages)})
def __lowercase ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value('string') for k in sorted(self.languages)}
@dataclass
class A__ :
"""simple docstring"""
__A : Optional[List] = None
__A : Optional[int] = None
__A : Optional[str] = None
# Automatically constructed
__A : ClassVar[str] = "dict"
__A : ClassVar[Any] = None
__A : str = field(default='''TranslationVariableLanguages''' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = sorted(set(self.languages)) if self.languages else None
a__ : Any = len(self.languages) if self.languages else None
def __call__( self) -> Union[str, Any]:
'''simple docstring'''
return pa.struct({'language': pa.list_(pa.string()), 'translation': pa.list_(pa.string())})
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : str = set(self.languages)
if self.languages and set(lowercase) - lang_set:
raise ValueError(
F'Some languages in example ({", ".join(sorted(set(lowercase) - lang_set))}) are not in valid set ({", ".join(lowercase)}).')
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a__ : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowercase , lowercase):
translation_tuples.append((lang, text))
else:
translation_tuples.extend([(lang, el) for el in text])
# Ensure translations are in ascending order by language code.
a__ , a__ : List[str] = zip(*sorted(lowercase))
return {"language": languages, "translation": translations}
def __lowercase ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value('string')),
"translation": Sequence(Value('string')),
}
| 302
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny')
a__ : Union[str, Any] = BertTokenizer.from_pretrained('bert-base-uncased')
a__ : Union[str, Any] = bertabert.config.encoder.vocab_size
a__ : Optional[Any] = tokenizer.sep_token_id
a__ : str = tokenizer.cls_token_id
a__ : Optional[Any] = 128
a__ : Tuple = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]')
a__ : Union[str, Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]')
a__ : List[str] = train_dataset.select(range(32))
a__ : Dict = val_dataset.select(range(16))
a__ : Tuple = 4
def _map_to_encoder_decoder_inputs(lowercase):
# Tokenizer will automatically set [BOS] <text> [EOS]
a__ : List[Any] = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase , max_length=512)
a__ : int = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase , max_length=128)
a__ : Tuple = inputs.input_ids
a__ : int = inputs.attention_mask
a__ : str = outputs.input_ids
a__ : List[str] = outputs.input_ids.copy()
a__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
a__ : Optional[int] = outputs.attention_mask
assert all(len(lowercase) == 512 for x in inputs.input_ids)
assert all(len(lowercase) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(lowercase):
a__ : Optional[int] = pred.label_ids
a__ : str = pred.predictions
# all unnecessary tokens are removed
a__ : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
a__ : List[str] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
a__ : Optional[Any] = sum([int(pred_str[i] == label_str[i]) for i in range(len(lowercase))]) / len(lowercase)
return {"accuracy": accuracy}
# map train dataset
a__ : Optional[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
a__ : Union[str, Any] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase , batch_size=lowercase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
a__ : Tuple = self.get_auto_remove_tmp_dir()
a__ : List[Any] = SeqaSeqTrainingArguments(
output_dir=lowercase , per_device_train_batch_size=lowercase , per_device_eval_batch_size=lowercase , predict_with_generate=lowercase , evaluation_strategy='steps' , do_train=lowercase , do_eval=lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
a__ : List[str] = SeqaSeqTrainer(
model=lowercase , args=lowercase , compute_metrics=_compute_metrics , train_dataset=lowercase , eval_dataset=lowercase , tokenizer=lowercase , )
# start training
trainer.train()
| 302
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ):
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
__lowercase = BlipaProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **lowerCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def snake_case__ ( self , **lowerCAmelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def snake_case__ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ):
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ):
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase_ , return_tensors="np" )
__lowercase = processor(images=lowerCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case__ ( self ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__lowercase = "lower newer"
__lowercase = processor(text=lowerCAmelCase_ )
__lowercase = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__lowercase = "lower newer"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def snake_case__ ( self ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase_ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case__ ( self ):
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
__lowercase = "lower newer"
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 576
|
from functools import reduce
lowerCAmelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowercase ( _UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 576
| 1
|
'''simple docstring'''
from statistics import mean, stdev
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : int = 3 ) -> list:
UpperCAmelCase : Optional[Any] = min(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = max(_lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowerCAmelCase ) for x in data]
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : int = 3 ) -> list:
UpperCAmelCase : Union[str, Any] = mean(_lowerCAmelCase )
UpperCAmelCase : List[Any] = stdev(_lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , _lowerCAmelCase ) for x in data]
| 127
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: List[Any] = logging.get_logger(__name__)
UpperCamelCase__: str = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = """lxmert"""
lowerCamelCase__ = {}
def __init__( self : Optional[Any] , __snake_case : Dict=30522 , __snake_case : Dict=768 , __snake_case : Any=12 , __snake_case : List[str]=9500 , __snake_case : List[str]=1600 , __snake_case : List[Any]=400 , __snake_case : Optional[Any]=3072 , __snake_case : int="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : int=0.1 , __snake_case : int=512 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=0.02 , __snake_case : Optional[int]=1E-12 , __snake_case : Dict=9 , __snake_case : str=5 , __snake_case : Optional[int]=5 , __snake_case : int=2048 , __snake_case : Any=4 , __snake_case : Dict=6.67 , __snake_case : Tuple=True , __snake_case : Tuple=True , __snake_case : Dict=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Any=True , __snake_case : str=True , **__snake_case : Any , ) -> Any:
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : List[str] = num_attention_heads
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : Dict = num_qa_labels
UpperCAmelCase : Optional[Any] = num_object_labels
UpperCAmelCase : int = num_attr_labels
UpperCAmelCase : str = l_layers
UpperCAmelCase : Optional[Any] = x_layers
UpperCAmelCase : str = r_layers
UpperCAmelCase : Any = visual_feat_dim
UpperCAmelCase : int = visual_pos_dim
UpperCAmelCase : Tuple = visual_loss_normalizer
UpperCAmelCase : Tuple = task_matched
UpperCAmelCase : Union[str, Any] = task_mask_lm
UpperCAmelCase : Optional[Any] = task_obj_predict
UpperCAmelCase : Tuple = task_qa
UpperCAmelCase : Dict = visual_obj_loss
UpperCAmelCase : List[Any] = visual_attr_loss
UpperCAmelCase : Dict = visual_feat_loss
UpperCAmelCase : str = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__snake_case )
| 127
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
__snake_case : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert('RGB' )
return image
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ):
__snake_case : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
__snake_case : List[str] = val
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__snake_case : List[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
__snake_case : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__snake_case : Tuple = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE_ , requires_grad=SCREAMING_SNAKE_CASE_ ), v_bias) )
__snake_case : Optional[Any] = qkv_bias
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
__snake_case : Optional[int] = 3_64 if 'coco' in model_name else 2_24
__snake_case : Tuple = InstructBlipVisionConfig(image_size=SCREAMING_SNAKE_CASE_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__snake_case : Any = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__snake_case : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__snake_case : Optional[int] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
__snake_case : Tuple = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__snake_case : Dict = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
__snake_case : Any = InstructBlipConfig(vision_config=SCREAMING_SNAKE_CASE_ , text_config=SCREAMING_SNAKE_CASE_ , qformer_config=SCREAMING_SNAKE_CASE_ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]=False ):
__snake_case : List[str] = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
__snake_case : Union[str, Any] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__snake_case : List[str] = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
__snake_case , __snake_case : Tuple = get_blipa_config(SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = InstructBlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
__snake_case : str = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
__snake_case , __snake_case : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__snake_case : Any = 'cuda:1' if torch.cuda.is_available() else 'cpu'
__snake_case : Optional[int] = 'cuda:2' if torch.cuda.is_available() else 'cpu'
__snake_case , __snake_case , __snake_case : str = load_model_and_preprocess(
name=SCREAMING_SNAKE_CASE_ , model_type=SCREAMING_SNAKE_CASE_ , is_eval=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
original_model.eval()
print('Done!' )
# update state dict keys
__snake_case : List[str] = original_model.state_dict()
__snake_case : Dict = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__snake_case : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if key.startswith('Qformer.bert' ):
__snake_case : Union[str, Any] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__snake_case : str = key.replace('self' , 'attention' )
if "llm_proj" in key:
__snake_case : Tuple = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
__snake_case : Tuple = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
__snake_case : str = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
__snake_case : Optional[int] = key.replace('t5' , 'language' )
__snake_case : Any = val
# read in qv biases
read_in_q_v_bias(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = load_demo_image()
__snake_case : List[Any] = 'What is unusual about this image?'
# create processor
__snake_case : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = InstructBlipProcessor(
image_processor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , qformer_tokenizer=SCREAMING_SNAKE_CASE_ , )
__snake_case : str = processor(images=SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# make sure processor creates exact same pixel values
__snake_case : Optional[int] = vis_processors['eval'](SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
__snake_case : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , SCREAMING_SNAKE_CASE_ )
original_model.to(SCREAMING_SNAKE_CASE_ )
hf_model.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
if "vicuna" in model_name:
__snake_case : Union[str, Any] = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
__snake_case : Optional[Any] = hf_model(**SCREAMING_SNAKE_CASE_ ).logits
else:
__snake_case : Union[str, Any] = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
__snake_case : Dict = tokenizer('\n' , return_tensors='pt' ).input_ids.to(SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
__snake_case : int = hf_model(**SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__snake_case : List[str] = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
print('Looks ok!' )
print('Generating with original model...' )
__snake_case : List[str] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
__snake_case : Union[str, Any] = hf_model.generate(
**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__snake_case : Dict = 2
print('Original generation:' , SCREAMING_SNAKE_CASE_ )
__snake_case : int = processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__snake_case : Tuple = [text.strip() for text in output_text]
print('HF generation:' , SCREAMING_SNAKE_CASE_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__magic_name__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679
| 0
|
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase_ ( lowerCamelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__magic_name__ :Optional[int] = field
__magic_name__ :List[Any] = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__magic_name__ :Optional[int] = Json(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , )
def A ( self ):
"""simple docstring"""
# Build iterable dataset
if self.streaming:
__magic_name__ :Dict = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ :int = None
__magic_name__ :Optional[Any] = None
__magic_name__ :int = None
__magic_name__ :str = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__magic_name__ :Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ :Tuple = dataset
__magic_name__ :Tuple = path_or_buf
__magic_name__ :Tuple = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ :Dict = num_proc
__magic_name__ :Dict = '''utf-8'''
__magic_name__ :List[Any] = to_json_kwargs
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCAmelCase )
__magic_name__ :Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ :Dict = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ :Union[str, Any] = self.to_json_kwargs.pop('''compression''' , __lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCAmelCase ) as buffer:
__magic_name__ :Optional[Any] = self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ :int = self._write(
file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
return written
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Tuple = args
__magic_name__ :int = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ :List[Any] = batch.to_pandas().to_json(
path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ):
"""simple docstring"""
__magic_name__ :Tuple = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ :Union[str, Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCAmelCase )
else:
__magic_name__ , __magic_name__ :List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCAmelCase )
return written
| 0
|
from math import sqrt
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(A__ )
lowercase__ = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(A__ )
lowercase__ = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCAmelCase ( A__ ):
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(A__ )
lowercase__ = len(A__ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(A__ )
lowercase__ = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(A__ , A__ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(A__ )
lowercase__ = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCAmelCase ( A__ , A__ ):
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCAmelCase ( A__ , A__ ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCAmelCase ( A__ ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 622
| 0
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str ):
def get_masked_lm_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = array.reshape(UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/bias""" )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/beta""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""embedding_table""" )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowerCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 59
|
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( *UpperCamelCase__: float ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float , UpperCamelCase__: float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 59
| 1
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class a__ ( a_ ):
def __init__( self , _a , _a ):
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = None , _a = 50 , _a = "pil" , _a = True , **_a , ):
lowercase : Any = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_a , )
lowercase : List[str] = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase : Optional[int] = self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase : Tuple = self.scheduler.step(_a , _a , _a ).prev_sample
lowercase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase : Any = self.numpy_to_pil(_a )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_a ), "This is a local test"
| 361
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_a ).to(_a )
lowercase : Union[str, Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase : Optional[Any] = tokenizer("Hello there" , return_tensors="pt" ).input_ids
lowercase : Union[str, Any] = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
lowercase : int = model(input_ids.to(_a ) , labels=labels.to(_a ) ).loss
lowercase : str = -(labels.shape[-1] * loss.item())
lowercase : List[str] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 361
| 1
|
from __future__ import annotations
def __UpperCamelCase ( a) ->List[str]:
lowerCamelCase__ = 2
lowerCamelCase__ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCAmelCase)
if n > 1:
factors.append(_lowerCAmelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
A_ = {str(digit): digit**5 for digit in range(1_0)}
def __UpperCamelCase ( a) ->int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a))
def __UpperCamelCase ( ) ->int:
return sum(
number
for number in range(1000, 1000000)
if number == digits_fifth_powers_sum(a))
if __name__ == "__main__":
print(solution())
| 360
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Tuple =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class snake_case__ (A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Dict = AlbertTokenizer
__lowerCAmelCase :str = AlbertTokenizerFast
__lowerCAmelCase :str = True
__lowerCAmelCase :List[str] = True
__lowerCAmelCase :List[Any] = True
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a__ : List[Any] = AlbertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = """this is a test"""
a__ : str = """this is a test"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = """<pad>"""
a__ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__lowercase ) , 3_0_0_0_0 )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[int] = self.get_rust_tokenizer()
a__ : int = """I was born in 92000, and this is falsé."""
a__ : Union[str, Any] = tokenizer.tokenize(__lowercase )
a__ : Dict = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
a__ : Optional[Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
a__ : Optional[int] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
a__ : Tuple = self.get_rust_tokenizer()
a__ : Dict = tokenizer.encode(__lowercase )
a__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = AlbertTokenizer(__lowercase , keep_accents=__lowercase )
a__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [4_8, 2_5, 2_1, 1_2_8_9] )
a__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
a__ : Optional[int] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
a__ : int = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = AlbertTokenizer(__lowercase )
a__ : Tuple = tokenizer.encode("""sequence builders""" )
a__ : Optional[int] = tokenizer.encode("""multi-sequence build""" )
a__ : Any = tokenizer.build_inputs_with_special_tokens(__lowercase )
a__ : str = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 136
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
a__ : Optional[int] = prime_factors(_lowercase)
if is_square_free(_lowercase):
return -1 if len(_lowercase) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 1
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A_ ( lowerCAmelCase_ ):
def __get__( self : str , snake_case_ : Tuple , snake_case_ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
_UpperCAmelCase = "__cached_" + self.fget.__name__
_UpperCAmelCase = getattr(snake_case_ , snake_case_ , snake_case_ )
if cached is None:
_UpperCAmelCase = self.fget(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
return cached
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def UpperCAmelCase_ ( __lowercase : int ) -> Tuple:
'''simple docstring'''
if is_torch_fx_proxy(__lowercase ):
return True
if is_torch_available():
import torch
if isinstance(__lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowercase , np.ndarray )
def UpperCAmelCase_ ( __lowercase : Any ) -> Tuple:
'''simple docstring'''
return isinstance(__lowercase , np.ndarray )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> str:
'''simple docstring'''
return _is_numpy(__lowercase )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Dict:
'''simple docstring'''
import torch
return isinstance(__lowercase , torch.Tensor )
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Tuple:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(__lowercase )
def UpperCAmelCase_ ( __lowercase : int ) -> int:
'''simple docstring'''
import torch
return isinstance(__lowercase , torch.device )
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(__lowercase )
def UpperCAmelCase_ ( __lowercase : int ) -> Tuple:
'''simple docstring'''
import torch
if isinstance(__lowercase , __lowercase ):
if hasattr(__lowercase , __lowercase ):
_UpperCAmelCase = getattr(__lowercase , __lowercase )
else:
return False
return isinstance(__lowercase , torch.dtype )
def UpperCAmelCase_ ( __lowercase : str ) -> List[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(__lowercase )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> int:
'''simple docstring'''
import tensorflow as tf
return isinstance(__lowercase , tf.Tensor )
def UpperCAmelCase_ ( __lowercase : Dict ) -> int:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(__lowercase )
def UpperCAmelCase_ ( __lowercase : int ) -> Dict:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(__lowercase )
return type(__lowercase ) == tf.Tensor
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Optional[int]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowercase )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Dict:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(__lowercase , jnp.ndarray )
def UpperCAmelCase_ ( __lowercase : Any ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(__lowercase )
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> int:
'''simple docstring'''
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_py_obj(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return [to_py_obj(__lowercase ) for o in obj]
elif is_tf_tensor(__lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase ).tolist()
elif isinstance(__lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCAmelCase_ ( __lowercase : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_numpy(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return np.array(__lowercase )
elif is_tf_tensor(__lowercase ):
return obj.numpy()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase )
else:
return obj
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = fields(self )
# Safety and consistency checks
if not len(snake_case_ ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
_UpperCAmelCase = getattr(self , class_fields[0].name )
_UpperCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = first_field.items()
_UpperCAmelCase = True
else:
try:
_UpperCAmelCase = iter(snake_case_ )
_UpperCAmelCase = True
except TypeError:
_UpperCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case_ ):
if (
not isinstance(snake_case_ , (list, tuple) )
or not len(snake_case_ ) == 2
or not isinstance(element[0] , snake_case_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCAmelCase = element[1]
elif first_field is not None:
_UpperCAmelCase = first_field
else:
for field in class_fields:
_UpperCAmelCase = getattr(self , field.name )
if v is not None:
_UpperCAmelCase = v
def __delitem__( self : Tuple , *snake_case_ : List[str] , **snake_case_ : int ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def lowercase ( self : str , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def lowercase ( self : Dict , *snake_case_ : int , **snake_case_ : Optional[Any] ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def lowercase ( self : str , *snake_case_ : Dict , **snake_case_ : Optional[int] ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : List[Any] , snake_case_ : Optional[int] ):
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case_ , snake_case_ )
super().__setattr__(snake_case_ , snake_case_ )
def __setitem__( self : List[str] , snake_case_ : Tuple , snake_case_ : List[str] ):
# Will raise a KeyException if needed
super().__setitem__(snake_case_ , snake_case_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case_ , snake_case_ )
def lowercase ( self : int ):
return tuple(self[k] for k in self.keys() )
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@classmethod
def lowercase ( cls : Dict , snake_case_ : List[Any] ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = """longest"""
_lowerCamelCase : Union[str, Any] = """max_length"""
_lowerCamelCase : int = """do_not_pad"""
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """pt"""
_lowerCamelCase : Any = """tf"""
_lowerCamelCase : str = """np"""
_lowerCamelCase : str = """jax"""
class A_ :
def __init__( self : int , snake_case_ : List[ContextManager] ):
_UpperCAmelCase = context_managers
_UpperCAmelCase = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(snake_case_ )
def __exit__( self : List[str] , *snake_case_ : Optional[int] , **snake_case_ : str ):
self.stack.__exit__(*snake_case_ , **snake_case_ )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = infer_framework(__lowercase )
if framework == "tf":
_UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = model_class.__name__
_UpperCAmelCase = infer_framework(__lowercase )
if framework == "tf":
_UpperCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCAmelCase_ ( __lowercase : MutableMapping , __lowercase : str = "" , __lowercase : str = "." ) -> int:
'''simple docstring'''
def _flatten_dict(__lowercase : int , __lowercase : Any="" , __lowercase : Union[str, Any]="." ):
for k, v in d.items():
_UpperCAmelCase = str(__lowercase ) + delimiter + str(__lowercase ) if parent_key else k
if v and isinstance(__lowercase , __lowercase ):
yield from flatten_dict(__lowercase , __lowercase , delimiter=__lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowercase , __lowercase , __lowercase ) )
@contextmanager
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : bool = False ) -> Optional[Any]:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCAmelCase_ ( __lowercase : Tuple , __lowercase : int=None ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(__lowercase ):
return np.transpose(__lowercase , axes=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.T if axes is None else array.permute(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.transpose(__lowercase , perm=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.transpose(__lowercase , axes=__lowercase )
else:
raise ValueError(f'Type not supported for transpose: {type(__lowercase )}.' )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] ) -> int:
'''simple docstring'''
if is_numpy_array(__lowercase ):
return np.reshape(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.reshape(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.reshape(__lowercase , __lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.reshape(__lowercase , __lowercase )
else:
raise ValueError(f'Type not supported for reshape: {type(__lowercase )}.' )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : List[Any]=None ) -> int:
'''simple docstring'''
if is_numpy_array(__lowercase ):
return np.squeeze(__lowercase , axis=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.squeeze(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.squeeze(__lowercase , axis=__lowercase )
else:
raise ValueError(f'Type not supported for squeeze: {type(__lowercase )}.' )
def UpperCAmelCase_ ( __lowercase : Dict , __lowercase : Optional[Any] ) -> Dict:
'''simple docstring'''
if is_numpy_array(__lowercase ):
return np.expand_dims(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.unsqueeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.expand_dims(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.expand_dims(__lowercase , axis=__lowercase )
else:
raise ValueError(f'Type not supported for expand_dims: {type(__lowercase )}.' )
def UpperCAmelCase_ ( __lowercase : Optional[int] ) -> str:
'''simple docstring'''
if is_numpy_array(__lowercase ):
return np.size(__lowercase )
elif is_torch_tensor(__lowercase ):
return array.numel()
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.size(__lowercase )
elif is_jax_tensor(__lowercase ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(__lowercase )}.' )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : List[str] ) -> Tuple:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(__lowercase , (tuple, list) ):
_UpperCAmelCase = [f'{repo_id}--{v}' if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase = f'{repo_id}--{value}'
return auto_map
def UpperCAmelCase_ ( __lowercase : Dict ) -> Optional[Any]:
'''simple docstring'''
for base_class in inspect.getmro(__lowercase ):
_UpperCAmelCase = base_class.__module__
_UpperCAmelCase = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 119
|
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE :Tuple = list[tuple[int, int]]
__SCREAMING_SNAKE_CASE :Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__SCREAMING_SNAKE_CASE :Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A_ :
def __init__( self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : float , snake_case_ : Node | None , ):
_UpperCAmelCase = pos_x
_UpperCAmelCase = pos_y
_UpperCAmelCase = (pos_y, pos_x)
_UpperCAmelCase = goal_x
_UpperCAmelCase = goal_y
_UpperCAmelCase = g_cost
_UpperCAmelCase = parent
_UpperCAmelCase = self.calculate_heuristic()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = abs(self.pos_x - self.goal_x )
_UpperCAmelCase = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : List[str] , snake_case_ : List[Any] ):
return self.f_cost < other.f_cost
class A_ :
def __init__( self : Tuple , snake_case_ : tuple[int, int] , snake_case_ : tuple[int, int] ):
_UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_UpperCAmelCase = [self.start]
_UpperCAmelCase = []
_UpperCAmelCase = False
def lowercase ( self : int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_UpperCAmelCase = True
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_UpperCAmelCase = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
if not self.reached:
return [self.start.pos]
return None
def lowercase ( self : List[str] , snake_case_ : Node ):
_UpperCAmelCase = []
for action in delta:
_UpperCAmelCase = parent.pos_x + action[1]
_UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def lowercase ( self : Any , snake_case_ : Node | None ):
_UpperCAmelCase = node
_UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :int = (0, 0)
__SCREAMING_SNAKE_CASE :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = GreedyBestFirst(init, goal)
__SCREAMING_SNAKE_CASE :Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__SCREAMING_SNAKE_CASE :Dict = 2
for elem in grid:
print(elem)
| 119
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=False ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCAmelCase_ : Dict = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCAmelCase_ : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCAmelCase_ : str = convert_pytorch_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase_ : List[str] = convert_pytorch_sharded_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return flax_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : Tuple[str] , _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Dict[str, jnp.ndarray] , _SCREAMING_SNAKE_CASE : str , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE : Tuple[str] ) -> bool:
return len(set(_SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase_ : List[Any] = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase_ : Dict = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase_ : Any = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase_ : str = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ : Tuple = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ : Optional[int] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase_ : Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase_ : Union[str, Any] = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase_ : str = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCAmelCase_ : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase_ : List[Any] = flax_model.params["params"]
else:
UpperCAmelCase_ : Optional[Any] = flax_model.params
UpperCAmelCase_ : str = flatten_dict(_SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : Tuple = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : str = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Dict = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : List[Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : int = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
UpperCAmelCase_ : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase_ : str = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : List[Any] = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
"""simple docstring"""
import torch
# Load the index
UpperCAmelCase_ : Optional[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase_ : str = torch.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase_ : Optional[int] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase_ : List[str] = flax_model.params["params"]
UpperCAmelCase_ : List[Any] = flatten_dict(_SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCAmelCase_ : Tuple = flax_model.params
UpperCAmelCase_ : Any = flatten_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase_ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ : Optional[int] = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCAmelCase_ : Optional[int] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : List[Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase_ : Dict = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
UpperCAmelCase_ : Dict = jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : str = jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ : Optional[int] = jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCAmelCase_ : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_SCREAMING_SNAKE_CASE , "rb" ) as state_f:
try:
UpperCAmelCase_ : Union[str, Any] = from_bytes(_SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCAmelCase_ : List[str] = flatten_dict(jax.tree_util.tree_map(lambda _SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , _SCREAMING_SNAKE_CASE ) ).values()
if any(_SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCAmelCase_ : List[Any] = jax.tree_util.tree_map(
lambda _SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = flatten_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = pt_model.state_dict()
UpperCAmelCase_ : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase_ : Any = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Optional[int] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_ : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase_ : Dict = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase_ : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase_ : Any = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Optional[Any] = jnp.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
UpperCAmelCase_ : List[str] = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase_ : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase_ : Optional[int] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase_ : Dict = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase_ : Tuple = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCAmelCase_ : Dict = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase_ : Union[str, Any] = ".".join(_SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase_ : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase_ : List[Any] = key.split("." )
UpperCAmelCase_ : int = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase_ : str = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase_ : Dict = key_components[-2] + "_v"
if name is not None:
UpperCAmelCase_ : Dict = key_components[:-3] + [name]
UpperCAmelCase_ : Union[str, Any] = ".".join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = key
if flax_key in special_pt_names:
UpperCAmelCase_ : List[str] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCAmelCase_ : List[str] = np.asarray(_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
UpperCAmelCase_ : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(_SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"If your task is similar to the task the model of the checkpoint was trained on, "
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 71
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Any =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
class _snake_case (metaclass=__SCREAMING_SNAKE_CASE):
__A : Dict =["speech"]
def __init__( self ,*_snake_case ,**_snake_case ):
requires_backends(self ,["speech"] )
| 71
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __a , unittest.TestCase ):
snake_case : Any = MobileBertTokenizer
snake_case : Optional[int] = MobileBertTokenizerFast
snake_case : Optional[Any] = True
snake_case : Tuple = True
snake_case : Union[str, Any] = filter_non_english
snake_case : Tuple = """google/mobilebert-uncased"""
def snake_case_ (self ):
super().setUp()
_UpperCAmelCase : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_UpperCAmelCase : List[str] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : str = """UNwant\u00E9d,running"""
_UpperCAmelCase : List[Any] = """unwanted, running"""
return input_text, output_text
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase : List[Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def snake_case_ (self ):
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Union[str, Any] = """UNwant\u00E9d,running"""
_UpperCAmelCase : List[str] = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
_UpperCAmelCase : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = """UNwant\u00E9d,running"""
_UpperCAmelCase : Any = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCAmelCase : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def snake_case_ (self ):
_UpperCAmelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ (self ):
_UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def snake_case_ (self ):
_UpperCAmelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ (self ):
_UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def snake_case_ (self ):
_UpperCAmelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ (self ):
_UpperCAmelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ (self ):
_UpperCAmelCase : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def snake_case_ (self ):
_UpperCAmelCase : str = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_UpperCAmelCase : Tuple = {}
for i, token in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase : int = i
_UpperCAmelCase : int = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def snake_case_ (self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def snake_case_ (self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def snake_case_ (self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase : Tuple = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def snake_case_ (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Any = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_UpperCAmelCase : Any = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""" ) else False
_UpperCAmelCase : int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def snake_case_ (self ):
_UpperCAmelCase : str = ["""的""", """人""", """有"""]
_UpperCAmelCase : Any = """""".join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = False
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : int = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase : List[Any] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 156
|
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case_ (self ):
_UpperCAmelCase : int = torch.nn.Linear(1_0 , 1_0 )
_UpperCAmelCase : Tuple = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCAmelCase : List[str] = Accelerator()
_UpperCAmelCase : List[Any] = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 156
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__lowercase : Union[str, Any] = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['''DPTFeatureExtractor''']
__lowercase : Dict = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : str = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct_text_model"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , __a=5_0244 , __a=768 , __a=64 , __a=2048 , __a=12 , __a=12 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gelu_new" , __a=0 , __a=False , __a=0 , __a=1 , __a=False , __a=True , **__a , ):
'''simple docstring'''
__a : Dict = vocab_size
__a : str = hidden_size
__a : Any = d_kv
__a : Any = d_ff
__a : Any = num_layers
__a : Any = num_heads
__a : Optional[Any] = relative_attention_num_buckets
__a : int = relative_attention_max_distance
__a : List[str] = dropout_rate
__a : Dict = layer_norm_epsilon
__a : int = initializer_factor
__a : Any = use_cache
__a : int = eos_token_id
__a : int = decoder_start_token_id
# for backwards compatibility
__a : Tuple = dense_act_fn
super().__init__(
pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , tie_word_embeddings=__a , is_decoder=__a , **__a , )
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__a , __a : Union[str, Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a : Optional[int] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct_vision_model"
def __init__( self , __a=768 , __a=768 , __a=2048 , __a=64 , __a=12 , __a=12 , __a="gelu_new" , __a=1E-6 , __a=0.0 , __a=0.0 , __a=1E-1_0 , __a=1.0 , __a=4096 , __a=32 , __a=128 , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Dict = hidden_size
__a : Tuple = patch_embed_hidden_size
__a : Tuple = d_ff
__a : int = dropout_rate
__a : Any = num_hidden_layers
__a : List[str] = num_attention_heads
__a : Optional[Any] = initializer_range
__a : Any = initializer_factor
__a : Optional[Any] = attention_dropout
__a : List[Any] = layer_norm_eps
__a : Dict = dense_act_fn
__a : Union[str, Any] = seq_len
__a : str = relative_attention_num_buckets
__a : Dict = relative_attention_max_distance
__a : Any = d_kv
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
cls._set_token_in_kwargs(__a )
__a , __a : Any = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
__a : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "pix2struct"
A_ = True
def __init__( self , __a=None , __a=None , __a=1.0 , __a=0.02 , __a=False , __a=False , __a=True , **__a , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__a , is_encoder_decoder=__a , **__a )
if text_config is None:
__a : str = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
__a : Optional[Any] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
__a : Optional[Any] = PixaStructTextConfig(**__a )
__a : Union[str, Any] = PixaStructVisionConfig(**__a )
__a : List[str] = self.text_config.decoder_start_token_id
__a : Optional[Any] = self.text_config.pad_token_id
__a : Dict = self.text_config.eos_token_id
__a : Any = initializer_factor
__a : List[str] = initializer_range
__a : int = self.initializer_range
__a : Union[str, Any] = self.initializer_range
__a : Dict = is_vqa
@classmethod
def __UpperCAmelCase ( cls , __a , __a , **__a ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = copy.deepcopy(self.__dict__ )
__a : Optional[int] = self.text_config.to_dict()
__a : Optional[int] = self.vision_config.to_dict()
__a : Optional[Any] = self.__class__.model_type
return output
| 476
| 0
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( _a , _a ):
a : int = 1
@register_to_config
def __init__( self : List[Any] , A_ : int = 1_0_0_0 , A_ : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(A_ )
# standard deviation of the initial noise distribution
__lowercase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowercase = 4
# running values
__lowercase = []
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , A_ : int , A_ : Union[str, torch.device] = None ):
'''simple docstring'''
__lowercase = num_inference_steps
__lowercase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowercase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowercase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowercase = torch.sin(steps * math.pi / 2 ) ** 2
__lowercase = (1.0 - self.betas**2) ** 0.5
__lowercase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowercase = timesteps.to(A_ )
__lowercase = []
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : torch.FloatTensor , A_ : int , A_ : torch.FloatTensor , A_ : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__lowercase = (self.timesteps == timestep).nonzero().item()
__lowercase = timestep_index + 1
__lowercase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A_ )
if len(self.ets ) == 1:
__lowercase = self.ets[-1]
elif len(self.ets ) == 2:
__lowercase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowercase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
__lowercase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
__lowercase = self._get_prev_sample(A_ , A_ , A_ , A_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def SCREAMING_SNAKE_CASE_ ( self : str , A_ : torch.FloatTensor , *A_ : List[str] , **A_ : Optional[int] ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Optional[Any] , A_ : Any , A_ : List[Any] , A_ : Optional[Any] ):
'''simple docstring'''
__lowercase = self.alphas[timestep_index]
__lowercase = self.betas[timestep_index]
__lowercase = self.alphas[prev_timestep_index]
__lowercase = self.betas[prev_timestep_index]
__lowercase = (sample - sigma * ets) / max(A_ , 1e-8 )
__lowercase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 442
|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def lowerCAmelCase_ ( UpperCamelCase__ : str ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += [key]
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
def lowerCAmelCase_ ( *UpperCamelCase__ : List[str] ):
"""simple docstring"""
def decorator(UpperCamelCase__ : Tuple ):
__lowercase = getattr(UpperCamelCase__ , """handle_key""" , [] )
handle += keys
setattr(UpperCamelCase__ , """handle_key""" , UpperCamelCase__ )
return func
return decorator
class lowerCamelCase__ ( _a ):
def __new__( cls : str , A_ : Optional[Any] , A_ : Union[str, Any] , A_ : int ):
'''simple docstring'''
__lowercase = super().__new__(cls , A_ , A_ , A_ )
if not hasattr(A_ , """key_handler""" ):
setattr(A_ , """key_handler""" , {} )
setattr(A_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowercase = getattr(A_ , """handle_key""" , [] )
for key in handled_keys:
__lowercase = value
return new_cls
@staticmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict ):
'''simple docstring'''
__lowercase = get_character()
if char != KEYMAP["undefined"]:
__lowercase = ord(A_ )
__lowercase = cls.key_handler.get(A_ )
if handler:
__lowercase = char
return handler(cls )
else:
return None
def lowerCAmelCase_ ( cls : int ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 442
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.