code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase )
DownloadCommand.register_subcommand(lowerCAmelCase )
EnvironmentCommand.register_subcommand(lowerCAmelCase )
RunCommand.register_subcommand(lowerCAmelCase )
ServeCommand.register_subcommand(lowerCAmelCase )
UserCommands.register_subcommand(lowerCAmelCase )
AddNewModelCommand.register_subcommand(lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase )
LfsCommands.register_subcommand(lowerCAmelCase )
PTtoTFCommand.register_subcommand(lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : int = args.func(lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 18
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : int = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase )] )
UpperCAmelCase__ : Any = np.array(lowerCAmelCase )
UpperCAmelCase__ : List[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase ) ) , x.transpose() ) , lowerCAmelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (1, 2, 1)
UpperCAmelCase__ : Tuple = (1, 1, 0, 7)
UpperCAmelCase__ : int = SARIMAX(
lowerCAmelCase , exog=lowerCAmelCase , order=lowerCAmelCase , seasonal_order=lowerCAmelCase )
UpperCAmelCase__ : Any = model.fit(disp=lowerCAmelCase , maxiter=6_00 , method="""nm""" )
UpperCAmelCase__ : Optional[Any] = model_fit.predict(1 , len(lowerCAmelCase ) , exog=[test_match] )
return result[0]
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : int = regressor.predict(lowerCAmelCase )
return y_pred[0]
def a__ ( lowerCAmelCase ) -> float:
train_user.sort()
UpperCAmelCase__ : Optional[Any] = np.percentile(lowerCAmelCase , 25 )
UpperCAmelCase__ : str = np.percentile(lowerCAmelCase , 75 )
UpperCAmelCase__ : int = qa - qa
UpperCAmelCase__ : Union[str, Any] = qa - (iqr * 0.1)
return low_lim
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase__ : Tuple = not_safe + 1
else:
if abs(abs(lowerCAmelCase ) - abs(lowerCAmelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_A = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
_A = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
_A = Normalizer().fit_transform(data_input_df.values)
# split data
_A = normalize_df[:, 2].tolist()
_A = normalize_df[:, 0].tolist()
_A = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_A = normalize_df[:, [1, 2]].tolist()
_A = x[: len(x) - 1]
_A = x[len(x) - 1 :]
# for linear regression & sarimax
_A = total_date[: len(total_date) - 1]
_A = total_user[: len(total_user) - 1]
_A = total_match[: len(total_match) - 1]
_A = total_date[len(total_date) - 1 :]
_A = total_user[len(total_user) - 1 :]
_A = total_match[len(total_match) - 1 :]
# voting system with forecasting
_A = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_A = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 171
| 0
|
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
UpperCamelCase__ : str = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
UpperCamelCase__ : Optional[Any] = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def UpperCAmelCase ( a_ , a_ ) -> Dict:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : int = json.loads(f.read() )
A_ : Optional[Any] = collections.OrderedDict()
A_ : Optional[Any] = collections.OrderedDict()
A_ : Any = collections.OrderedDict()
with open(a_ , """r""" , encoding="""utf-8""" ) as f:
A_ : Dict = f.readlines()
A_ : Optional[int] = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(a_ ):
A_ : List[Any] = b
A_ : str = idx
for wd in b:
A_ : Union[str, Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase="<|startoftext|>" , _lowerCamelCase="<|endoftext|>" , _lowerCamelCase=False , **_lowerCamelCase , ) -> Any:
super().__init__(
unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , do_clean_text=_lowerCamelCase , **_lowerCamelCase , )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_lowerCamelCase ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
A_ : Dict = do_clean_text
A_ , A_ , A_ , A_ : List[str] = load_vocab_and_emoji(_lowerCamelCase , _lowerCamelCase )
A_ : str = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase_ ( self ) -> int:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def UpperCAmelCase_ ( self ) -> str:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
return self.subword_tokenizer.tokenize(_lowerCamelCase , clean=self.do_clean_text )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
return self.vocab.get(_lowerCamelCase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
return self.subword_tokenizer.convert_id_to_token(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[Any]:
A_ : Union[str, Any] = """""".join(_lowerCamelCase ).strip()
return out_string
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> List[int]:
A_ : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) + [self.eos_token_id] )
if len(_lowerCamelCase ) > self.model_max_length:
A_ : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
A_ : Tuple = 0
if os.path.isdir(_lowerCamelCase ):
A_ : int = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : str = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
A_ : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
A_ : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
A_ : str = token_index
writer.write(""",""".join(_lowerCamelCase ) + """\n""" )
index += 1
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _lowerCamelCase )
return vocab_file, emoji_file
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Union[str, Any] = vocab # same as swe
A_ : Optional[Any] = ids_to_tokens # same as bpe
A_ : Optional[Any] = emoji
A_ : List[str] = np.max([len(_lowerCamelCase ) for w in self.vocab.keys()] )
A_ : Dict = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
A_ : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
A_ : Union[str, Any] = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
A_ : Tuple = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A_ : Optional[Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
A_ : List[str] = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
A_ : Tuple = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
A_ : Tuple = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
A_ : List[Any] = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self ) -> Optional[int]:
return len(self.ids_to_tokens )
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> str:
A_ : Dict = self.content_repattera.sub("""<URL>""" , _lowerCamelCase )
A_ : Any = self.content_repattera.sub("""<EMAIL>""" , _lowerCamelCase )
A_ : List[str] = self.content_repattera.sub("""<TEL>""" , _lowerCamelCase )
A_ : List[str] = self.content_repattera.sub("""<DATE>""" , _lowerCamelCase )
A_ : Optional[int] = self.content_repattera.sub("""<DATE>""" , _lowerCamelCase )
A_ : List[str] = self.content_repattera.sub("""<PRICE>""" , _lowerCamelCase )
A_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A_ : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
A_ : int = text.replace(""" """ , """<SP>""" )
A_ : str = text.replace(""" """ , """<SP>""" )
A_ : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" )
A_ : str = text.replace("""\n""" , """<BR>""" )
A_ : Optional[int] = text.replace("""\r""" , """<BR>""" )
A_ : Dict = text.replace("""\t""" , """<TAB>""" )
A_ : str = text.replace("""—""" , """ー""" )
A_ : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
A_ : Any = text.replace(_lowerCamelCase , _lowerCamelCase )
if clean:
A_ : Dict = self.clean_text(_lowerCamelCase )
def check_simbol(_lowerCamelCase ):
A_ : List[str] = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 2:
A_ : Dict = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2_A1 and c <= 0xC2_BF)
or (c >= 0xC7_80 and c <= 0xC7_83)
or (c >= 0xCA_B9 and c <= 0xCB_BF)
or (c >= 0xCC_80 and c <= 0xCD_A2)
):
return True
return False
def checkuae(_lowerCamelCase ):
A_ : List[Any] = x.encode()
if len(_lowerCamelCase ) == 1 and len(_lowerCamelCase ) == 3:
A_ : Tuple = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_80_80 and c <= 0xE2_B0_7F:
return True
return False
A_ : int = 0
A_ : Any = []
while pos < len(_lowerCamelCase ):
A_ : Any = min(len(_lowerCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
A_ : Union[str, Any] = [] # (token_id, token, pos)
for e in range(_lowerCamelCase , _lowerCamelCase , -1 ):
A_ : Union[str, Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_lowerCamelCase ) > 2:
A_ : Dict = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_lowerCamelCase ) > 0:
# the smallest token_id is adopted
A_ , A_ , A_ : List[str] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[0] )[0]
result.append(_lowerCamelCase )
A_ : Optional[Any] = e
else:
A_ : Optional[int] = pos + 1
A_ : Optional[int] = text[pos:end]
if check_simbol(_lowerCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(_lowerCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
A_ : Optional[Any] = end
return result
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase="\n" ) -> Tuple:
A_ : List[Any] = []
A_ : List[Any] = []
A_ : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
A_ : Optional[int] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_lowerCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
words.append(bytearray(_lowerCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
A_ : Optional[Any] = """""".join(_lowerCamelCase )
return text
| 164
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = ['''input_values''', '''attention_mask''']
def __init__( self , _lowerCamelCase = 1 , _lowerCamelCase = 1_6000 , _lowerCamelCase = 0.0 , _lowerCamelCase = False , _lowerCamelCase = 80 , _lowerCamelCase = 16 , _lowerCamelCase = 64 , _lowerCamelCase = "hann_window" , _lowerCamelCase = 1.0 , _lowerCamelCase = 80 , _lowerCamelCase = 7600 , _lowerCamelCase = 1e-10 , _lowerCamelCase = 2 , _lowerCamelCase = True , **_lowerCamelCase , ) -> List[Any]:
super().__init__(feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , **_lowerCamelCase )
A_ : List[Any] = do_normalize
A_ : Union[str, Any] = return_attention_mask
A_ : Tuple = num_mel_bins
A_ : List[str] = hop_length
A_ : int = win_length
A_ : Optional[int] = win_function
A_ : List[Any] = frame_signal_scale
A_ : str = fmin
A_ : Optional[Any] = fmax
A_ : Any = mel_floor
A_ : Any = reduction_factor
A_ : Tuple = win_length * sampling_rate // 1000
A_ : Dict = hop_length * sampling_rate // 1000
A_ : Dict = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
A_ : int = window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCamelCase )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , _lowerCamelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
A_ : Dict = np.array(_lowerCamelCase , np.intaa )
A_ : Dict = []
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
A_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
A_ : Any = padding_value
normed_input_values.append(_lowerCamelCase )
else:
A_ : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase_ ( self , _lowerCamelCase , ) -> np.ndarray:
A_ : int = spectrogram(
_lowerCamelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
A_ : Dict = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
else:
A_ : Optional[int] = None
if audio_target is not None:
A_ : Union[str, Any] = self._process_audio(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase , )
if inputs is None:
return inputs_target
else:
A_ : Optional[int] = inputs_target["""input_values"""]
A_ : Tuple = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
A_ : int = decoder_attention_mask
return inputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ) -> BatchFeature:
A_ : Optional[int] = isinstance(_lowerCamelCase , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
A_ : List[str] = is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Union[str, Any] = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
A_ : List[str] = np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A_ : Optional[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : int = [speech]
# needed to make pad() work on spectrogram inputs
A_ : List[Any] = self.feature_size
# convert into correct format for padding
if is_target:
A_ : Tuple = [self._extract_mel_features(_lowerCamelCase ) for waveform in speech]
A_ : Tuple = BatchFeature({"""input_values""": features} )
A_ : Dict = self.num_mel_bins
else:
A_ : Union[str, Any] = BatchFeature({"""input_values""": speech} )
A_ : Tuple = self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
A_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
A_ : str = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_lowerCamelCase , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A_ : Tuple = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_lowerCamelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A_ : List[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
A_ : Any = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A_ : str = [np.asarray(_lowerCamelCase , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A_ : Any = (
attention_mask
if self._get_padding_strategies(_lowerCamelCase , max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A_ : Any = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=_lowerCamelCase , padding_value=self.padding_value )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def UpperCAmelCase_ ( self ) -> Dict[str, Any]:
A_ : List[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A_ : Optional[int] = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 164
| 1
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
a = None
class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a = PandasConfig
def lowercase_ ( self : Optional[int] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase_ ( self : int , __lowerCamelCase : Optional[Any] ) -> List[Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
SCREAMING_SNAKE_CASE__ = data_files
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE__ = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
SCREAMING_SNAKE_CASE__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE__ = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={'''files''': files} ) )
return splits
def lowercase_ ( self : int , __lowerCamelCase : pa.Table ) -> Tuple:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE__ = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def lowercase_ ( self : str , __lowerCamelCase : str ) -> str:
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 314
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=7 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : int=30 , lowerCamelCase : Optional[int]=4_00 , lowerCamelCase : Tuple=True , lowerCamelCase : str=None , lowerCamelCase : Tuple=True , lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase : int=True , lowerCamelCase : Optional[Any]=1 / 2_55 , lowerCamelCase : Optional[Any]=True , ) -> str:
lowerCAmelCase_ : Optional[int] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : int = num_channels
lowerCAmelCase_ : Optional[int] = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : str = size
lowerCAmelCase_ : str = do_normalize
lowerCAmelCase_ : List[str] = image_mean
lowerCAmelCase_ : str = image_std
lowerCAmelCase_ : Union[str, Any] = do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor
lowerCAmelCase_ : List[str] = do_pad
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : int=False ) -> Any:
if not batched:
lowerCAmelCase_ : Optional[int] = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
lowerCAmelCase_ : Dict = image.size
else:
lowerCAmelCase_ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ : Dict = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase_ : int = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_ : Tuple = self.size["shortest_edge"]
lowerCAmelCase_ : Tuple = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase_ : Tuple = self.size["shortest_edge"]
lowerCAmelCase_ : List[Any] = self.size["shortest_edge"]
else:
lowerCAmelCase_ : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase_ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ : int = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
lowerCAmelCase_ : int = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( lowerCamelCase__ ,unittest.TestCase):
"""simple docstring"""
lowercase = DeformableDetrImageProcessor if is_vision_available() else None
def __lowercase ( self : Tuple ) -> Tuple:
lowerCAmelCase_ : Tuple = DeformableDetrImageProcessingTester(self )
@property
def __lowercase ( self : Optional[int] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_pad""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
lowerCAmelCase_ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def __lowercase ( self : Dict ) -> List[str]:
pass
def __lowercase ( self : List[Any] ) -> Any:
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowerCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
lowerCAmelCase_ : Tuple = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : str = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Tuple ) -> List[str]:
lowerCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ : Optional[int] = json.loads(f.read() )
lowerCAmelCase_ : Tuple = {"image_id": 3_97_69, "annotations": target}
# encode them
lowerCAmelCase_ : Dict = DeformableDetrImageProcessor()
lowerCAmelCase_ : Optional[int] = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ : int = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase )
lowerCAmelCase_ : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase_ : List[str] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase ) )
# verify boxes
lowerCAmelCase_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ : Any = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase ) )
# verify is_crowd
lowerCAmelCase_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase ) )
# verify class_labels
lowerCAmelCase_ : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase ) )
# verify orig_size
lowerCAmelCase_ : Optional[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase ) )
# verify size
lowerCAmelCase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase ) )
@slow
def __lowercase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase_ : str = json.loads(f.read() )
lowerCAmelCase_ : Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowerCAmelCase_ : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase_ : List[Any] = DeformableDetrImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase_ : Dict = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase_ : List[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCAmelCase_ : Tuple = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase ) )
# verify boxes
lowerCAmelCase_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase )
lowerCAmelCase_ : List[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCAmelCase_ : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase ) )
# verify is_crowd
lowerCAmelCase_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase ) )
# verify class_labels
lowerCAmelCase_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase ) )
# verify masks
lowerCAmelCase_ : Dict = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase )
# verify orig_size
lowerCAmelCase_ : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase ) )
# verify size
lowerCAmelCase_ : str = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase ) )
| 354
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase_ : str = get_sagemaker_input()
else:
lowerCAmelCase_ : Optional[int] = get_cluster_input()
return config
def UpperCamelCase_ ( A__ : Optional[Any]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase_ : List[str] = subparsers.add_parser("""config""" , description=A__ )
else:
lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser("""Accelerate config command""" , description=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def UpperCamelCase_ ( A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Dict = get_user_input()
if args.config_file is not None:
lowerCAmelCase_ : List[str] = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
lowerCAmelCase_ : List[Any] = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(f'accelerate configuration saved at {config_file}' )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = config_command_parser()
lowerCAmelCase_ : Tuple = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 89
| 0
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__A = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[str] , _lowerCamelCase: List[Any] , _lowerCamelCase: Any , _lowerCamelCase: Optional[Any]=False , _lowerCamelCase: List[str]=True ) -> Any:
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__lowerCamelCase : Optional[Any] = cached_file(_lowerCamelCase , _lowerCamelCase , force_download=not use_cached_models )
__lowerCamelCase : Union[str, Any] = config_class.from_json_file(_lowerCamelCase )
__lowerCamelCase : List[str] = True
__lowerCamelCase : Dict = True
print(F"""Building TensorFlow model from configuration: {config}""" )
__lowerCamelCase : Optional[Any] = model_class(_lowerCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__lowerCamelCase : int = cached_file(
_lowerCamelCase , _lowerCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__lowerCamelCase : Any = load_pytorch_checkpoint_in_tfa_model(_lowerCamelCase , _lowerCamelCase )
if compare_with_pt_model:
__lowerCamelCase : List[str] = tf_model(tf_model.dummy_inputs , training=_lowerCamelCase ) # build the network
__lowerCamelCase : str = torch.load(_lowerCamelCase , map_location="cpu" )
__lowerCamelCase : Tuple = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_lowerCamelCase , config=_lowerCamelCase , state_dict=_lowerCamelCase )
with torch.no_grad():
__lowerCamelCase : Tuple = pt_model(**pt_model.dummy_inputs )
__lowerCamelCase : List[str] = pto[0].numpy()
__lowerCamelCase : str = tfo[0].numpy()
__lowerCamelCase : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2E-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_lowerCamelCase , save_format="h5" )
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Dict , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: int=None , _lowerCamelCase: str=False , _lowerCamelCase: Tuple=False , _lowerCamelCase: List[Any]=False , _lowerCamelCase: Any=False , ) -> Optional[int]:
'''simple docstring'''
if args_model_type is None:
__lowerCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
__lowerCamelCase : List[str] = [args_model_type]
for j, model_type in enumerate(_lowerCamelCase , start=1 ):
print("=" * 100 )
print(F""" Converting model type {j}/{len(_lowerCamelCase )}: {model_type}""" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__lowerCamelCase : Tuple = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__lowerCamelCase : str = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_lowerCamelCase , _lowerCamelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
__lowerCamelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(_lowerCamelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__lowerCamelCase : Optional[Any] = cached_file(_lowerCamelCase , _lowerCamelCase , force_download=not use_cached_models )
else:
__lowerCamelCase : Any = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__lowerCamelCase : Optional[Any] = cached_file(_lowerCamelCase , _lowerCamelCase , force_download=not use_cached_models )
else:
__lowerCamelCase : List[Any] = model_shortcut_name
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : Any = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=_lowerCamelCase , pytorch_checkpoint_path=_lowerCamelCase , config_file=_lowerCamelCase , tf_dump_path=os.path.join(_lowerCamelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=_lowerCamelCase , )
if remove_cached_files:
os.remove(_lowerCamelCase )
os.remove(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
__A = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 135
|
"""simple docstring"""
import re
def lowercase_ ( _lowerCamelCase: str ) -> bool:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__A = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 135
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] = None , ) -> Tuple:
lowerCamelCase_ : List[str] ={}
if train_file is not None:
lowerCamelCase_ : Tuple =[train_file]
if eval_file is not None:
lowerCamelCase_ : List[str] =[eval_file]
if test_file is not None:
lowerCamelCase_ : List[Any] =[test_file]
lowerCamelCase_ : List[Any] =datasets.load_dataset("csv" , data_files=__lowerCAmelCase )
lowerCamelCase_ : Optional[int] =list(ds[list(files.keys() )[0]].features.keys() )
lowerCamelCase_ : List[str] =features_name.pop(__lowerCAmelCase )
lowerCamelCase_ : Optional[Any] =list(set(ds[list(files.keys() )[0]][label_name] ) )
lowerCamelCase_ : Optional[Any] ={label: i for i, label in enumerate(__lowerCAmelCase )}
lowerCamelCase_ : int =tokenizer.model_input_names
lowerCamelCase_ : List[Any] ={}
if len(__lowerCAmelCase ) == 1:
for k in files.keys():
lowerCamelCase_ : Optional[int] =ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" ) , batched=__lowerCAmelCase , )
elif len(__lowerCAmelCase ) == 2:
for k in files.keys():
lowerCamelCase_ : Optional[Any] =ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , ) , batched=__lowerCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowerCamelCase_ : int ={k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ : Union[str, Any] =labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowerCamelCase_ : Optional[Any] ={k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ : List[str] =labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowerCamelCase_ : List[Any] ={k: v for k, v in ex.items() if k in input_names}
lowerCamelCase_ : Any =labelaid[ex[label_name]]
yield (d, label)
lowerCamelCase_ : List[Any] =(
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowerCamelCase_ : str =train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowerCamelCase_ : Tuple =(
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowerCamelCase_ : List[Any] =val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowerCamelCase_ : List[Any] =(
tf.data.Dataset.from_generator(
__lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowerCamelCase_ : Optional[int] =test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
A__ : List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase__ :
_UpperCAmelCase :int = field(metadata={"help": "Which column contains the label"} )
_UpperCAmelCase :str = field(default=UpperCamelCase_, metadata={"help": "The path of the training file"} )
_UpperCAmelCase :Optional[str] = field(default=UpperCamelCase_, metadata={"help": "The path of the development file"} )
_UpperCAmelCase :Optional[str] = field(default=UpperCamelCase_, metadata={"help": "The path of the test file"} )
_UpperCAmelCase :int = field(
default=128, metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
}, )
_UpperCAmelCase :bool = field(
default=UpperCamelCase_, metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
_UpperCAmelCase :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase :Optional[str] = field(
default=UpperCamelCase_, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=UpperCamelCase_, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase :bool = field(default=UpperCamelCase_, metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase :Optional[str] = field(
default=UpperCamelCase_, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, )
def _snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowerCamelCase_ : Optional[int] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : List[Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ : Union[str, Any] =get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowerCamelCase_ : List[str] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowerCamelCase_ : Any =TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase_ : Optional[int] =np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowerCamelCase_ : str =TFTrainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=__lowerCAmelCase , eval_dataset=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ : Union[str, Any] ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ : Union[str, Any] =trainer.evaluate()
lowerCamelCase_ : Tuple =os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(__lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 362
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A__ : Tuple = logging.get_logger(__name__)
A__ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : int = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A__ : Dict = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
A__ : Dict = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
A__ : Any = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
A__ : Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A__ : Tuple = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A__ : Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Tuple = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = VOCAB_FILES_NAMES
_UpperCAmelCase :Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Dict = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ : Union[str, Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A__ : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A__ : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(snake_case__ )
class lowercase__ :
def __call__( self : List[str] , snake_case__ : List[str] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Union[bool, str] = False , snake_case__ : Union[bool, str] = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[bool] = None , **snake_case__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
elif titles is None or texts is None:
lowerCamelCase_ : Dict =titles if texts is None else texts
return super().__call__(
snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Tuple =titles if not isinstance(snake_case__ , snake_case__ ) else [titles]
lowerCamelCase_ : List[str] =texts if not isinstance(snake_case__ , snake_case__ ) else [texts]
lowerCamelCase_ : int =len(snake_case__ )
lowerCamelCase_ : Any =questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
F"""There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts.""" )
lowerCamelCase_ : Any =super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
lowerCamelCase_ : Union[str, Any] =super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )["input_ids"]
lowerCamelCase_ : str ={
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ : List[Any] =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ : Optional[int] =attention_mask
return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ )
def UpperCAmelCase__ ( self : str , snake_case__ : BatchEncoding , snake_case__ : DPRReaderOutput , snake_case__ : int = 16 , snake_case__ : int = 64 , snake_case__ : int = 4 , ):
lowerCamelCase_ : str =reader_input["input_ids"]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : str =reader_output[:3]
lowerCamelCase_ : List[Any] =len(snake_case__ )
lowerCamelCase_ : Any =sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ )
lowerCamelCase_ : List[DPRReaderOutput] =[]
for doc_id in sorted_docs:
lowerCamelCase_ : int =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ : List[Any] =sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ : Optional[int] =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ : int =len(snake_case__ )
lowerCamelCase_ : int =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase__ ( self : Any , snake_case__ : List[int] , snake_case__ : List[int] , snake_case__ : int , snake_case__ : int , ):
lowerCamelCase_ : Any =[]
for start_index, start_score in enumerate(snake_case__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ : List[str] =sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ )
lowerCamelCase_ : List[Any] =[]
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
lowerCamelCase_ : Tuple =end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case__ )
class lowercase__ ( snake_case__, snake_case__ ):
_UpperCAmelCase :List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :int = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Optional[Any] = ["input_ids", "attention_mask"]
| 209
| 0
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def a__ ( A_, A_, A_, A_, A_, A_, A_, A_, A_, ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__magic_name__ = cst_fwd.get(A_, np.inf )
__magic_name__ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__magic_name__ = new_cost_f
__magic_name__ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__magic_name__ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = -1
__magic_name__ = set()
__magic_name__ = set()
__magic_name__ = {source: 0}
__magic_name__ = {destination: 0}
__magic_name__ = {source: None}
__magic_name__ = {destination: None}
__magic_name__ = PriorityQueue()
__magic_name__ = PriorityQueue()
__magic_name__ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__magic_name__ , __magic_name__ = queue_forward.get()
visited_forward.add(A_ )
__magic_name__ , __magic_name__ = queue_backward.get()
visited_backward.add(A_ )
__magic_name__ = pass_and_relaxation(
A_, A_, A_, A_, A_, A_, A_, A_, A_, )
__magic_name__ = pass_and_relaxation(
A_, A_, A_, A_, A_, A_, A_, A_, A_, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__magic_name__ = shortest_distance
return shortest_path_distance
__lowerCAmelCase : Optional[Any] = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
__lowerCAmelCase : Optional[Any] = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = ShapEImgaImgPipeline
A_ = ['image']
A_ = ['image']
A_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ = False
@property
def UpperCAmelCase__ ( self : Dict )->Optional[Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : Any )->Optional[Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : List[Any] )->Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : int )->int:
'''simple docstring'''
return 8
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : int = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : int = CLIPVisionModel(_snake_case )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_snake_case , do_normalize=_snake_case , do_resize=_snake_case , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def UpperCAmelCase__ ( self : Dict )->Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : str = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCAmelCase : List[str] = PriorTransformer(**_snake_case )
return model
@property
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase : Any = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : Any = ShapERenderer(**_snake_case )
return model
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.dummy_prior
__lowerCAmelCase : str = self.dummy_image_encoder
__lowerCAmelCase : Tuple = self.dummy_image_processor
__lowerCAmelCase : Union[str, Any] = self.dummy_renderer
__lowerCAmelCase : str = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=_snake_case , clip_sample=_snake_case , clip_sample_range=1.0 , )
__lowerCAmelCase : str = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Dict=0 )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_snake_case ) ).to(_snake_case )
if str(_snake_case ).startswith("""mps""" ):
__lowerCAmelCase : Optional[Any] = torch.manual_seed(_snake_case )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
__lowerCAmelCase : Tuple = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase__ ( self : Any )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = """cpu"""
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_snake_case )
__lowerCAmelCase : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Dict = pipe(**self.get_dummy_inputs(_snake_case ) )
__lowerCAmelCase : List[Any] = output.images[0]
__lowerCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : Union[str, Any] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase__ ( self : Dict )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = torch_device == """cpu"""
__lowerCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_snake_case , relax_max_difference=_snake_case , )
def UpperCAmelCase__ ( self : str )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Dict = self.pipeline_class(**_snake_case )
__lowerCAmelCase : int = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Tuple = 2
__lowerCAmelCase : Any = self.get_dummy_inputs(_snake_case )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Dict = batch_size * [inputs[key]]
__lowerCAmelCase : Optional[int] = pipe(**_snake_case , num_images_per_prompt=_snake_case )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str )->Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
__lowerCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
__lowerCAmelCase : List[Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
__lowerCAmelCase : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
__lowerCAmelCase : Optional[Any] = torch.Generator(device=_snake_case ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
_snake_case , generator=_snake_case , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_snake_case , _snake_case )
| 359
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int | float]] ) -> int:
__lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = len(matrix[0] )
__lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for row in range(SCREAMING_SNAKE_CASE ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowerCAmelCase : Optional[int] = True
for i in range(row + 1 , SCREAMING_SNAKE_CASE ):
if matrix[i][row] != 0:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = matrix[i], matrix[row]
__lowerCAmelCase : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 232
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__A = 8
def _A ( lowercase__ , lowercase__=BITS ):
lowercase__ = x.device
lowercase__ = (x * 255).int().clamp(0 , 255 )
lowercase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase__ )
lowercase__ = rearrange(lowercase__ , """d -> d 1 1""" )
lowercase__ = rearrange(lowercase__ , """b c h w -> b c 1 h w""" )
lowercase__ = ((x & mask) != 0).float()
lowercase__ = rearrange(lowercase__ , """b c d h w -> b (c d) h w""" )
lowercase__ = bits * 2 - 1
return bits
def _A ( lowercase__ , lowercase__=BITS ):
lowercase__ = x.device
lowercase__ = (x > 0).int()
lowercase__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=lowercase__ , dtype=torch.intaa )
lowercase__ = rearrange(lowercase__ , """d -> d 1 1""" )
lowercase__ = rearrange(lowercase__ , """b (c d) h w -> b c d h w""" , d=8 )
lowercase__ = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def _A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 0.0 , lowercase__ = True , lowercase__=None , lowercase__ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowercase__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowercase__ = self.alphas_cumprod[timestep]
lowercase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowercase__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowercase__ = self.bit_scale
if self.config.clip_sample:
lowercase__ = torch.clamp(lowercase__ , -scale , lowercase__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowercase__ = self._get_variance(lowercase__ , lowercase__ )
lowercase__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowercase__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowercase__ = model_output.device if torch.is_tensor(lowercase__ ) else """cpu"""
lowercase__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=lowercase__ ).to(lowercase__ )
lowercase__ = self._get_variance(lowercase__ , lowercase__ ) ** 0.5 * eta * noise
lowercase__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__ )
def _A ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__="epsilon" , lowercase__=None , lowercase__ = True , ):
lowercase__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = torch.split(lowercase__ , sample.shape[1] , dim=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = self.alphas_cumprod[t]
lowercase__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowercase__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowercase__ = self.bit_scale
if self.config.clip_sample:
lowercase__ = torch.clamp(lowercase__ , -scale , lowercase__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowercase__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowercase__ = 0
if t > 0:
lowercase__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=lowercase__ ).to(model_output.device )
lowercase__ = (self._get_variance(lowercase__ , predicted_variance=lowercase__ ) ** 0.5) * noise
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowercase__ , pred_original_sample=lowercase__ )
class A ( __UpperCAmelCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase__ = bit_scale
lowercase__ = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 50 , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , **lowerCamelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
lowercase__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase__ , )
lowercase__ = decimal_to_bits(lowerCamelCase__ ) * self.bit_scale
lowercase__ = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowercase__ = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
lowercase__ = bits_to_decimal(lowerCamelCase__ )
if output_type == "pil":
lowercase__ = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 164
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class _lowerCamelCase ( A__ ):
UpperCAmelCase_ = "llama"
UpperCAmelCase_ = ["past_key_values"]
def __init__(self , __a=3_20_00 , __a=40_96 , __a=1_10_08 , __a=32 , __a=32 , __a=None , __a="silu" , __a=20_48 , __a=0.02 , __a=1e-6 , __a=True , __a=0 , __a=1 , __a=2 , __a=1 , __a=False , __a=None , **__a , ) -> List[str]:
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCamelCase = num_attention_heads
UpperCamelCase = num_key_value_heads
UpperCamelCase = hidden_act
UpperCamelCase = initializer_range
UpperCamelCase = rms_norm_eps
UpperCamelCase = pretraining_tp
UpperCamelCase = use_cache
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , tie_word_embeddings=__A , **__A , )
def snake_case_ (self ) -> List[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"got {self.rope_scaling}" )
UpperCamelCase = self.rope_scaling.get("type" , __A )
UpperCamelCase = self.rope_scaling.get("factor" , __A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__A , __A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 351
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowerCAmelCase__ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowerCAmelCase__ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCAmelCase__ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 68
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26
| 1
|
from math import ceil, sqrt
def A (__A : int = 1000000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCAmelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCAmelCase_ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 51
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __A :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=6_4 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
'''simple docstring'''
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
lowerCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = True
# test_resize_embeddings = False
lowerCAmelCase_ = False
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
'''simple docstring'''
lowerCamelCase__ = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = MegatronBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
return torch.tensor(
__snake_case ,dtype=torch.long ,device=__snake_case ,)
_a = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCamelCase__ = os.path.join(os.environ['''MYDIR'''] , __lowerCAmelCase )
lowerCamelCase__ = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
lowerCamelCase__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowerCamelCase__ = model(__lowerCAmelCase )[0]
lowerCamelCase__ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , __lowerCAmelCase )
lowerCamelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowerCamelCase__ = output[0, ii, jj]
lowerCamelCase__ = expected[3 * ii + jj]
lowerCamelCase__ = '''ii={} jj={} a={} b={}'''.format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
| 209
| 0
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = '▁'
SCREAMING_SNAKE_CASE :List[str] = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :int = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
SCREAMING_SNAKE_CASE :str = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = ["input_ids"]
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = RESOURCE_FILES_NAMES
def __init__( self : List[Any] ,A : Tuple ,A : Union[str, Any]=None ,A : int=False ,A : List[Any]="utf8" ,A : Dict="[UNK]" ,A : List[Any]="[SEP]" ,A : Optional[int]="[PAD]" ,A : int="[CLS]" ,A : List[str]="[MASK]" ,A : Optional[Dict[str, Any]] = None ,**A : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,vocab_file=A ,encoding=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = do_lower_case
__A = sentencepiece_model_ckpt
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A = self.load_vocab(filepath=A )
else:
__A = {self.sp_model.id_to_piece(A ): id for id in range(self.sp_model.get_piece_size() )}
__A = {v: k for k, v in self.vocab.items()}
def UpperCamelCase_ ( self : List[Any] ,A : Tuple ):
if text is None:
return None
__A = self.tokenize(A )
__A , __A = "", []
for i, ch in enumerate(A ):
if ch in self.SP_CHAR_MAPPING:
__A = self.SP_CHAR_MAPPING.get(A )
else:
__A = unicodedata.normalize("NFKC" ,A )
if self.is_whitespace(A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A ) )
__A , __A , __A = normalized_text, [], 0
if self.do_lower_case:
__A = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A = token[1:]
__A = text[offset:].index(A ) + offset
__A = start + len(A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A = end
return token_mapping
@property
def UpperCamelCase_ ( self : List[str] ):
return len(self.vocab )
def UpperCamelCase_ ( self : Union[str, Any] ):
return dict(self.vocab ,**self.added_tokens_encoder )
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : List[Any] ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCamelCase_ ( self : List[Any] ,A : List[str] ):
return "".join((self.SP_CHAR_MAPPING.get(A ,A ) for c in text) )
def UpperCamelCase_ ( self : str ,A : Dict ,A : Optional[Any]=False ,A : Optional[Any]=64 ,A : Tuple=0.1 ):
if self.sp_model_kwargs.get("enable_sampling" ) is True:
__A = True
if self.sp_model_kwargs.get("alpha" ) is not None:
__A = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
__A = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
__A = self.sp_model.EncodeAsPieces(A )
else:
__A = self.sp_model.SampleEncodeAsPieces(A ,A ,A )
__A = []
for pi, piece in enumerate(A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A ) and pi != 0:
new_pieces.append(A )
continue
else:
continue
__A = 0
for i, chunk in enumerate(A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A ) or self.is_punct(A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A )
__A = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A = i
if len(A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCamelCase_ ( self : List[str] ,A : Tuple ):
__A = "".join(A ).replace(A ," " ).strip()
return out_string
def UpperCamelCase_ ( self : int ,A : str ):
__A = self.convert_ids_to_tokens(A )
__A = "".join(A ).replace(A ," " ).strip()
return out_string
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ):
return self.vocab.get(A ,self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : str ,A : Tuple ):
return self.reverse_vocab.get(A ,self.unk_token )
def UpperCamelCase_ ( self : List[str] ,A : Optional[int] ,A : Union[str, Any]=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCamelCase_ ( self : List[Any] ,A : Any ,A : Union[str, Any]=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : Dict=None ,A : Union[str, Any]=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Optional[int] ,A : List[int] ,A : Optional[List[int]] = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A ) + 1) + [1] * (len(A ) + 3)
def UpperCamelCase_ ( self : Tuple ,A : int ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCamelCase_ ( self : Dict ,A : List[str] ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCamelCase_ ( self : int ,A : str ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCamelCase_ ( self : Optional[int] ,A : List[Any] ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A ) == 1:
__A = unicodedata.category(A )
if cat == "Zs":
return True
return False
def UpperCamelCase_ ( self : Dict ,A : Optional[int] ):
__A = {}
with io.open(A ,"r" ,encoding="utf-8" ) as f:
for index, line in enumerate(A ):
__A = line.rstrip("\n" )
__A = int(A )
return token_to_idx
def UpperCamelCase_ ( self : List[Any] ,A : str ,A : Optional[str] = None ):
__A = 0
if os.path.isdir(A ):
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
__A = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(A ,"w" ,encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
__A = token_index
writer.write(token + "\n" )
index += 1
__A = os.path.join(A ,"sentencepiece.bpe.model" )
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (vocab_file,)
| 124
|
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE :Dict = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase ( a_ , a_ , a_ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError("Donor concentration should be positive" )
elif acceptor_conc <= 0:
raise ValueError("Acceptor concentration should be positive" )
elif intrinsic_conc <= 0:
raise ValueError("Intrinsic concentration should be positive" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"Donor concentration should be greater than intrinsic concentration" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"Acceptor concentration should be greater than intrinsic concentration" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __magic_name__ ( __lowercase ):
'''simple docstring'''
__UpperCamelCase = "realm"
def __init__( self , _a=30_522 , _a=768 , _a=128 , _a=12 , _a=12 , _a=8 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=256 , _a=10 , _a=1e-3 , _a=5 , _a=320 , _a=13_353_718 , _a=5_000 , _a=1 , _a=0 , _a=2 , **_a , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
# Common config
lowerCamelCase = vocab_size
lowerCamelCase = max_position_embeddings
lowerCamelCase = hidden_size
lowerCamelCase = retriever_proj_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_candidates
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = type_vocab_size
lowerCamelCase = layer_norm_eps
# Reader config
lowerCamelCase = span_hidden_size
lowerCamelCase = max_span_width
lowerCamelCase = reader_layer_norm_eps
lowerCamelCase = reader_beam_size
lowerCamelCase = reader_seq_len
# Retrieval config
lowerCamelCase = num_block_records
lowerCamelCase = searcher_beam_size
| 291
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Any) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCamelCase : Any = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
__UpperCamelCase : str = F'{src_lang}-{tgt_lang}'
__UpperCamelCase : Tuple = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
__UpperCamelCase : Dict = os.path.join(_lowerCamelCase , "README.md")
print(F'Generating {path}')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(_lowerCamelCase)
# make sure we are under the root of the project
lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
lowercase : Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase , lowercase , lowercase : int = model_name.split('-')
lowercase : Dict = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 232
| 0
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
snake_case : List[str] = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
a :Optional[Any] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case : Optional[Any] = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
a :List[Any] = list(s_dict.keys() )
for key in keys:
a :Tuple = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a :List[Any] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F'''{key} -> {new_key}''' )
a :Dict = s_dict.pop(UpperCAmelCase_ )
return s_dict
def __lowerCamelCase ( UpperCAmelCase_ : Any ):
"""simple docstring"""
a :Dict = emb.weight.shape
a :List[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
a :Dict = emb.weight.data
return lin_layer
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
"""simple docstring"""
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
a :List[Any] = os.path.basename(UpperCAmelCase_ )
a :Union[str, Any] = url.split('''/''' )[-2]
a :Union[str, Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.exists(UpperCAmelCase_ ) and not os.path.isfile(UpperCAmelCase_ ):
raise RuntimeError(F'''{download_target} exists and is not a regular file''' )
if os.path.isfile(UpperCAmelCase_ ):
a :Dict = open(UpperCAmelCase_ , '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(UpperCAmelCase_ ) as source, open(UpperCAmelCase_ , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=UpperCAmelCase_ , unit_divisor=1024 ) as loop:
while True:
a :Optional[int] = source.read(8192 )
if not buffer:
break
output.write(UpperCAmelCase_ )
loop.update(len(UpperCAmelCase_ ) )
a :List[Any] = open(UpperCAmelCase_ , '''rb''' ).read()
if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
a :Tuple = _download(_MODELS[checkpoint_path] )
else:
a :List[str] = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
a :Optional[int] = original_checkpoint['''dims''']
a :Union[str, Any] = original_checkpoint['''model_state_dict''']
a :str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(UpperCAmelCase_ )
rename_keys(UpperCAmelCase_ )
a :Any = True
a :Any = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
a :List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
a :List[Any] = WhisperForConditionalGeneration(UpperCAmelCase_ )
a :Optional[int] = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0 and not set(UpperCAmelCase_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
a :List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a :List[str] = proj_out_weights
model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case : str = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 361
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 281
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( __a : Optional[Any] , __a : Optional[int] , __a : List[Any] ):
'''simple docstring'''
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
UpperCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
UpperCamelCase__ = np.concatenate(__a , axis=0 )
UpperCamelCase__ = np.array(__a ).astype(np.floataa ) / 255.0
UpperCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ = 2.0 * image - 1.0
UpperCamelCase__ = torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ = torch.cat(__a , dim=0 )
return image
def __magic_name__ ( __a : Dict , __a : int , __a : List[Any] , __a : Tuple=0.9_995 ):
'''simple docstring'''
if not isinstance(__a , np.ndarray ):
UpperCamelCase__ = True
UpperCamelCase__ = va.device
UpperCamelCase__ = va.cpu().numpy()
UpperCamelCase__ = va.cpu().numpy()
UpperCamelCase__ = np.sum(va * va / (np.linalg.norm(__a ) * np.linalg.norm(__a )) )
if np.abs(__a ) > DOT_THRESHOLD:
UpperCamelCase__ = (1 - t) * va + t * va
else:
UpperCamelCase__ = np.arccos(__a )
UpperCamelCase__ = np.sin(__a )
UpperCamelCase__ = theta_a * t
UpperCamelCase__ = np.sin(__a )
UpperCamelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCamelCase__ = sin_theta_t / sin_theta_a
UpperCamelCase__ = sa * va + sa * va
if inputs_are_torch:
UpperCamelCase__ = torch.from_numpy(__a ).to(__a )
return va
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = F.normalize(__a , dim=-1 )
UpperCamelCase__ = F.normalize(__a , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( __a : Optional[int] , __a : Optional[int] ):
'''simple docstring'''
for param in model.parameters():
UpperCamelCase__ = value
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , clip_model=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , coca_model=SCREAMING_SNAKE_CASE_ , coca_tokenizer=SCREAMING_SNAKE_CASE_ , coca_transform=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = (
feature_extractor.size
if isinstance(feature_extractor.size , SCREAMING_SNAKE_CASE_ )
else feature_extractor.size["""shortest_edge"""]
)
UpperCamelCase__ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , SCREAMING_SNAKE_CASE_ )
set_requires_grad(self.clip_model , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
set_requires_grad(self.vae , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
set_requires_grad(self.unet , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# get the original timestep using init_timestep
UpperCamelCase__ = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
if not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(SCREAMING_SNAKE_CASE_ )}" )
UpperCamelCase__ = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(SCREAMING_SNAKE_CASE_ )
]
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
else:
UpperCamelCase__ = self.vae.encode(SCREAMING_SNAKE_CASE_ ).latent_dist.sample(SCREAMING_SNAKE_CASE_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ = 0.1_8215 * init_latents
UpperCamelCase__ = init_latents.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
UpperCamelCase__ = randn_tensor(init_latents.shape , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
UpperCamelCase__ = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = init_latents
return latents
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.coca_transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCamelCase__ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCamelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.feature_extractor.preprocess(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCamelCase__ = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_embeddings_clip.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = latents.detach().requires_grad_()
UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCamelCase__ = self.scheduler.alphas_cumprod[timestep]
UpperCamelCase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCamelCase__ = torch.sqrt(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.scheduler.sigmas[index]
UpperCamelCase__ = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ = 1 / 0.1_8215 * sample
UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = transforms.Resize(self.feature_extractor_size )(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.normalize(SCREAMING_SNAKE_CASE_ ).to(latents.dtype )
UpperCamelCase__ = self.clip_model.get_image_features(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = spherical_dist_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() * clip_guidance_scale
UpperCamelCase__ = -torch.autograd.grad(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )[0]
if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = latents.detach() + grads * (sigma**2)
UpperCamelCase__ = noise_pred_original
else:
UpperCamelCase__ = noise_pred_original - torch.sqrt(SCREAMING_SNAKE_CASE_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 5_12 , SCREAMING_SNAKE_CASE_ = 0.6 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = 7.5 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 0.8 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(SCREAMING_SNAKE_CASE_ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Generator ) and batch_size > 1:
UpperCamelCase__ = [generator] + [None] * (batch_size - 1)
UpperCamelCase__ = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
UpperCamelCase__ = [x[0] for x in coca_is_none if x[1]]
UpperCamelCase__ = """, """.join(SCREAMING_SNAKE_CASE_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase__ = self.get_image_description(SCREAMING_SNAKE_CASE_ )
if style_prompt is None:
if len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
UpperCamelCase__ = self.get_image_description(SCREAMING_SNAKE_CASE_ )
# get prompt text embeddings for content and style
UpperCamelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
UpperCamelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCamelCase__ = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ = text_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# set timesteps
UpperCamelCase__ = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_offset:
UpperCamelCase__ = 1
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCamelCase__ = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# Preprocess image
UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = preprocess(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.prepare_latents(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text_embeddings.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = slerp(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clip_guidance_scale > 0:
UpperCamelCase__ = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_clip_image_embeddings(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = slerp(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ = content_text_input.input_ids.shape[-1]
UpperCamelCase__ = self.tokenizer([""""""] , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCamelCase__ = uncond_embeddings.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE_ ).to(
self.device )
else:
UpperCamelCase__ = torch.randn(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
# check if the scheduler accepts generator
UpperCamelCase__ = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCamelCase__ = generator
with self.progress_bar(total=SCREAMING_SNAKE_CASE_ ):
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
UpperCamelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = noise_pred.chunk(2 )
UpperCamelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCamelCase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCamelCase__ , UpperCamelCase__ = self.cond_fn(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCamelCase__ = 1 / 0.1_8215 * latents
UpperCamelCase__ = self.vae.decode(SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE_ , nsfw_content_detected=SCREAMING_SNAKE_CASE_ )
| 244
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (CMStochasticIterativeScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [1_06, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict noise residual
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ )
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_ )
| 244
| 1
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Tuple = '▁'
_lowerCamelCase : int = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class __UpperCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = BertGenerationTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def A (self : Union[str, Any] ):
super().setUp()
A = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A (self : Union[str, Any] ):
A = """<s>"""
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase ) , _lowerCAmelCase )
def A (self : int ):
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_lowerCAmelCase ) , 1002 )
def A (self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A (self : Union[str, Any] ):
A = BertGenerationTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
A = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
A = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def A (self : str ):
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def A (self : int ):
A = """Hello World!"""
A = [1_8536, 2260, 101]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@slow
def A (self : Dict ):
A = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
A = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(_lowerCAmelCase , self.big_tokenizer.encode(_lowerCAmelCase ) )
@require_torch
@slow
def A (self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
A = list(self.big_tokenizer.get_vocab().keys() )[:10]
A = """ """.join(_lowerCAmelCase )
A = self.big_tokenizer.encode_plus(_lowerCAmelCase , return_tensors="""pt""" , return_token_type_ids=_lowerCAmelCase )
A = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_lowerCAmelCase )
A = BertGenerationConfig()
A = BertGenerationEncoder(_lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_lowerCAmelCase )
model(**_lowerCAmelCase )
@slow
def A (self : List[str] ):
# fmt: off
A = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 337
|
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class lowercase ( Generic[T] ):
def __init__( self , _a ) -> str:
_A : Union[str, Any] = data
_A : Node[T] | None = None
def __str__( self ) -> str:
return F'''{self.data}'''
class lowercase ( Generic[T] ):
def __init__( self ) -> None:
_A : Node[T] | None = None
def __iter__( self ) -> Iterator[T]:
_A : Union[str, Any] = self.top
while node:
yield node.data
_A : Any = node.next
def __str__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __len__( self ) -> int:
return len(tuple(iter(self ) ) )
def a__ ( self ) -> bool:
return self.top is None
def a__ ( self , _a ) -> None:
_A : Any = Node(_a )
if not self.is_empty():
_A : int = self.top
_A : List[str] = node
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , _a )
_A : int = self.top
_A : Any = self.top.next
return pop_node.data
def a__ ( self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def a__ ( self ) -> None:
_A : Dict = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 26
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26
| 1
|
import math
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_a ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 353
|
def lowerCamelCase_ ( _a = 4_000_000 ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def UpperCAmelCase ( A ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase ( self ) -> str:
raise NotImplementedError()
| 124
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : int = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """segformer"""
def __init__( self , A=3 , A=4 , A=[2, 2, 2, 2] , A=[8, 4, 2, 1] , A=[3_2, 6_4, 1_6_0, 2_5_6] , A=[7, 3, 3, 3] , A=[4, 2, 2, 2] , A=[1, 2, 5, 8] , A=[4, 4, 4, 4] , A="gelu" , A=0.0 , A=0.0 , A=0.1 , A=0.02 , A=0.1 , A=1e-6 , A=2_5_6 , A=2_5_5 , **A , ) -> Dict:
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , A , )
snake_case : List[str] = num_channels
snake_case : Optional[int] = num_encoder_blocks
snake_case : Optional[int] = depths
snake_case : str = sr_ratios
snake_case : str = hidden_sizes
snake_case : Any = patch_sizes
snake_case : Tuple = strides
snake_case : List[str] = mlp_ratios
snake_case : Optional[Any] = num_attention_heads
snake_case : int = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[Any] = classifier_dropout_prob
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = drop_path_rate
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = decoder_hidden_size
snake_case : Tuple = kwargs.get("""reshape_last_stage""" , A )
snake_case : List[str] = semantic_loss_ignore_index
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self ) -> int:
return 1_2
| 124
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Optional[int] = '▁'
A : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
A : Any = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A : Any = {
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
A : int = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = ['''input_ids''', '''attention_mask''']
A__ = []
A__ = []
def __init__(self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any]="<s>" , _UpperCAmelCase : int="</s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : List[str]="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__ = legacy_behaviour
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
lowercase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ = 1
lowercase__ = len(self.sp_model )
lowercase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
lowercase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowercase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase__ = src_lang if src_lang is not None else """eng_Latn"""
lowercase__ = self.lang_code_to_id[self._src_lang]
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
lowercase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : int , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowerCamelCase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCamelCase__ (self : List[str] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
lowercase__ = [1] * len(self.prefix_tokens )
lowercase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowercase__ = src_lang
lowercase__ = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = self.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tgt_lang_id
return inputs
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[str] ) -> int:
"""simple docstring"""
lowercase__ = """""".join(_UpperCAmelCase ).replace(_UpperCAmelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , """wb""" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "eng_Latn" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "fra_Latn" , **_UpperCAmelCase : Optional[int] , ) -> BatchEncoding:
"""simple docstring"""
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[Any] ) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
def lowerCamelCase__ (self : str , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
lowercase__ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__ = [self.cur_lang_code]
lowercase__ = [self.eos_token_id]
| 146
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A : int = trt.Logger(trt.Logger.WARNING)
A : Dict = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A : Union[str, Any] = logging.getLogger(__name__)
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
A : List[Any] = parser.parse_args()
if args.tokenizer_name:
A : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
A : Optional[Any] = args.per_device_eval_batch_size
A : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A : Any = True
A : Optional[int] = 'temp_engine/bert-fp32.engine'
if args.fpaa:
A : Union[str, Any] = 'temp_engine/bert-fp16.engine'
if args.inta:
A : Optional[int] = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A : Union[str, Any] = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A : Dict = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __magic_name__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __magic_name__ )
# start time
lowercase__ = time.time()
# Run inference
context.execute_async(
bindings=[int(__magic_name__ ) for d_inp in d_inputs] + [int(__magic_name__ ), int(__magic_name__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
cuda.memcpy_dtoh_async(__magic_name__ , __magic_name__ , __magic_name__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ = time.time()
lowercase__ = end_time - start_time
lowercase__ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A : Dict = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A : str = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A : str = raw_datasets['validation'].column_names
A : Any = 'question' if 'question' in column_names else column_names[0]
A : int = 'context' if 'context' in column_names else column_names[1]
A : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A : Dict = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
A : str = min(args.max_seq_length, tokenizer.model_max_length)
def UpperCamelCase ( __magic_name__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=__magic_name__ , stride=args.doc_stride , return_overflowing_tokens=__magic_name__ , return_offsets_mapping=__magic_name__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ = tokenized_examples.sequence_ids(__magic_name__ )
lowercase__ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
A : Optional[Any] = raw_datasets['validation']
# Validation Feature Creation
A : int = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
A : Dict = default_data_collator
A : Union[str, Any] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
A : Optional[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any]="eval" ) -> List[Any]:
"""simple docstring"""
lowercase__ = postprocess_qa_predictions(
examples=__magic_name__ , features=__magic_name__ , predictions=__magic_name__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__magic_name__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__magic_name__ , label_ids=__magic_name__ )
A : Union[str, Any] = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def UpperCamelCase ( __magic_name__ : Any ) -> Union[str, Any]:
"""simple docstring"""
return trt.volume(engine.get_binding_shape(__magic_name__ ) ) * engine.get_binding_dtype(__magic_name__ ).itemsize
# Allocate device memory for inputs and outputs.
A : Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A : List[str] = cuda.mem_alloc(h_outputa.nbytes)
A : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
A : List[Any] = 0.0
A : Any = 0
A : str = timeit.default_timer()
A : Tuple = None
for step, batch in enumerate(eval_dataloader):
A , A : Optional[int] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A , A : int = outputs
A : str = torch.tensor(start_logits)
A : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
A : Tuple = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
A : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A : str = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
A : List[str] = nested_truncate(all_preds, len(eval_dataset))
A : List[Any] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
A : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
A : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 146
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281
| 0
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __UpperCamelCase ( lowercase__ : int = 8 ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase__ ) for _ in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
i -= len(lowercase__ )
lowerCAmelCase_ : Optional[int] = i // 3
lowerCAmelCase_ : Tuple = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase_ : str = (
chars_incl
+ random(lowercase__ , quotient + remainder )
+ random(lowercase__ , lowercase__ )
+ random(lowercase__ , lowercase__ )
)
lowerCAmelCase_ : Any = list(lowercase__ )
shuffle(lowercase__ )
return "".join(lowercase__ )
# random is a generalised function for letters, characters and numbers
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(lowercase__ ) for _ in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int = 8 ) -> bool:
'''simple docstring'''
if len(lowercase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase_ : Tuple = any(char in ascii_uppercase for char in password )
lowerCAmelCase_ : Optional[Any] = any(char in ascii_lowercase for char in password )
lowerCAmelCase_ : Tuple = any(char in digits for char in password )
lowerCAmelCase_ : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = int(input("""Please indicate the max length of your password: """ ).strip() )
lowerCAmelCase_ : int = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowercase__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowercase__ , lowercase__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = '''▁'''
__a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = BertGenerationTokenizer
A : Tuple = False
A : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
lowercase : str = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self ):
lowercase : int = '''<s>'''
lowercase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1002 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCamelCase ( self ):
lowercase : List[Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] , )
lowercase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCamelCase ( self ):
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCamelCase ( self ):
lowercase : int = '''Hello World!'''
lowercase : Optional[Any] = [18536, 2260, 101]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __lowerCamelCase ( self ):
lowercase : Dict = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@require_torch
@slow
def __lowerCamelCase ( self ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowercase : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = BertGenerationConfig()
lowercase : Union[str, Any] = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE__ )
model(**SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCamelCase ( self ):
# fmt: off
lowercase : Optional[int] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 337
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase_ : str = {'facebook/blenderbot_small-90M': 512}
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[Any]:
"""simple docstring"""
a_ : Tuple = set()
a_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a_ : Optional[Any] = char
a_ : Optional[Any] = set(__A )
return pairs
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : List[Any]="__end__" , SCREAMING_SNAKE_CASE__ : str="__unk__" , SCREAMING_SNAKE_CASE__ : Dict="__null__" , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
a_ : Dict = json.load(SCREAMING_SNAKE_CASE__ )
a_ : int = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
a_ : str = merges_handle.read().split('\n' )[1:-1]
a_ : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
a_ : Tuple = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
a_ : Tuple = {}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
if token in self.cache:
return self.cache[token]
a_ : Any = re.sub('([.,!?()])' , r' \1' , SCREAMING_SNAKE_CASE__ )
a_ : List[str] = re.sub('(\')' , r' \1 ' , SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = re.sub(r'\s{2,}' , ' ' , SCREAMING_SNAKE_CASE__ )
if "\n" in token:
a_ : str = token.replace('\n' , ' __newln__' )
a_ : Tuple = token.split(' ' )
a_ : Any = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE__ ):
continue
a_ : Union[str, Any] = token.lower()
a_ : Dict = tuple(SCREAMING_SNAKE_CASE__ )
a_ : Any = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
a_ : List[Any] = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE__ )
continue
while True:
a_ : Any = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a_ , a_ : Union[str, Any] = bigram
a_ : Dict = []
a_ : Dict = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
a_ : Optional[Any] = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
new_word.extend(word[i:j] )
a_ : Any = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a_ : Any = tuple(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
a_ : int = get_pairs(SCREAMING_SNAKE_CASE__ )
a_ : str = '@@ '.join(SCREAMING_SNAKE_CASE__ )
a_ : Tuple = word[:-4]
a_ : List[Any] = word
words.append(SCREAMING_SNAKE_CASE__ )
return " ".join(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
a_ : List[Any] = []
a_ : int = re.findall(r'\S+\n?' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> int:
a_ : Optional[int] = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
a_ : int = ' '.join(SCREAMING_SNAKE_CASE__ ).replace('@@ ' , '' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : str = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a_ : Dict = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
a_ : Tuple = 0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
a_ : Optional[int] = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
| 120
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
UpperCAmelCase_ : str = 1.054571817e-34 # unit of ℏ : J * s
UpperCAmelCase_ : Dict = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
a_ : Optional[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
a_ : List[str] = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
a_ : Tuple = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase__ = '''src/transformers'''
# Matches is_xxx_available()
lowerCAmelCase__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCAmelCase__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase__ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCAmelCase__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCAmelCase__ = re.compile(r'''^\s*else:''')
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
lowerCAmelCase : Dict = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : Tuple = f.readlines()
lowerCAmelCase : Any = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase : List[Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
lowerCAmelCase : Optional[int] = re.findall("\[([^\]]+)\]" , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowerCAmelCase : Optional[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowerCAmelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase : Union[str, Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowerCAmelCase : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
lowerCAmelCase : Optional[int] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
lowerCAmelCase : str = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
lowerCAmelCase : Tuple = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(", " )
lowerCAmelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
lowerCAmelCase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase : Optional[Any] = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : Dict = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase : Optional[int] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowerCAmelCase : Union[str, Any] = lines[line_index]
lowerCAmelCase : Dict = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowerCAmelCase : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
def find_duplicates(SCREAMING_SNAKE_CASE : List[Any] ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase : Dict = []
for key in import_dict_objects.keys():
lowerCAmelCase : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase : List[Any] = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )
lowerCAmelCase : List[Any] = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
lowerCAmelCase : Any = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : Tuple = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("\n\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob("*.py" ) ) ) == 0:
continue
lowerCAmelCase : int = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Tuple = short_path.replace(os.path.sep , "." )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase : Dict = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
lowerCAmelCase__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = importlib.util.spec_from_file_location(
"transformers" , os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase : Optional[int] = spec.loader.load_module()
lowerCAmelCase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : Tuple = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 108
|
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = 2
@add_end_docstrings(A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__(self , *A , **A ) -> Tuple:
"""simple docstring"""
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a = None
if self.model.config.prefix is not None:
_a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a = self._sanitize_parameters(prefix=A , **self._forward_params )
_a = {**self._preprocess_params, **preprocess_params}
_a = {**self._forward_params, **forward_params}
def a__ (self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> str:
"""simple docstring"""
_a = {}
if prefix is not None:
_a = prefix
if prefix:
_a = self.tokenizer(
A , padding=A , add_special_tokens=A , return_tensors=self.framework )
_a = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
''' [None, \'hole\']''' )
_a = handle_long_generation
preprocess_params.update(A )
_a = generate_kwargs
_a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a = ReturnType.TENSORS
if return_type is not None:
_a = return_type
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if stop_sequence is not None:
_a = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__ (self , *A , **A ) -> List[Any]:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*A , **A )
def __call__(self , A , **A ) -> int:
"""simple docstring"""
return super().__call__(A , **A )
def a__ (self , A , A="" , A=None , **A ) -> Any:
"""simple docstring"""
_a = self.tokenizer(
prefix + prompt_text , padding=A , add_special_tokens=A , return_tensors=self.framework )
_a = prompt_text
if handle_long_generation == "hole":
_a = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a = generate_kwargs['''max_new_tokens''']
else:
_a = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def a__ (self , A , **A ) -> Any:
"""simple docstring"""
_a = model_inputs['''input_ids''']
_a = model_inputs.get('''attention_mask''' , A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a = None
_a = None
_a = 1
else:
_a = input_ids.shape[0]
_a = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a = self.model.generate(input_ids=A , attention_mask=A , **A )
_a = generated_sequence.shape[0]
if self.framework == "pt":
_a = generated_sequence.reshape(A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a = tf.reshape(A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def a__ (self , A , A=ReturnType.FULL_TEXT , A=True ) -> str:
"""simple docstring"""
_a = model_outputs['''generated_sequence'''][0]
_a = model_outputs['''input_ids''']
_a = model_outputs['''prompt_text''']
_a = generated_sequence.numpy().tolist()
_a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a = self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a = 0
else:
_a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A , clean_up_tokenization_spaces=A , ) )
if return_type == ReturnType.FULL_TEXT:
_a = prompt_text + text[prompt_length:]
else:
_a = text[prompt_length:]
_a = {'''generated_text''': all_text}
records.append(A )
return records
| 211
| 0
|
from __future__ import annotations
def __A ( _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__A = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 75
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __magic_name__ ( __lowerCAmelCase):
A: List[str] = "char"
A: List[str] = "bpe"
A: Tuple = "wp"
__UpperCamelCase : List[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __magic_name__ ( __lowerCAmelCase):
A: List[str] = ["image_processor", "char_tokenizer"]
A: str = "ViTImageProcessor"
A: Any = "MgpstrTokenizer"
def __init__( self : Optional[Any] , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : Optional[Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
UpperCamelCase__ : Optional[int] = tokenizer
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained('''gpt2''' )
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : int , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : Dict ) -> Dict:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCamelCase__ : Optional[Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
UpperCamelCase__ : Optional[int] = self.char_tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCamelCase__ : str = encodings['''input_ids''']
return inputs
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : int ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[int] = sequences
UpperCamelCase__ : Any = char_preds.size(0 )
UpperCamelCase__ , UpperCamelCase__ : int = self._decode_helper(lowerCamelCase__ , '''char''' )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self._decode_helper(lowerCamelCase__ , '''bpe''' )
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = self._decode_helper(lowerCamelCase__ , '''wp''' )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = []
for i in range(lowerCamelCase__ ):
UpperCamelCase__ : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCamelCase__ : Tuple = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCamelCase__ : Optional[Any] = scores.index(max(lowerCamelCase__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCamelCase__ : Tuple = {}
UpperCamelCase__ : int = final_strs
UpperCamelCase__ : int = final_scores
UpperCamelCase__ : Tuple = char_strs
UpperCamelCase__ : Optional[Any] = bpe_strs
UpperCamelCase__ : Dict = wp_strs
return out
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
if format == DecodeType.CHARACTER:
UpperCamelCase__ : Union[str, Any] = self.char_decode
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Optional[int] = '''[s]'''
elif format == DecodeType.BPE:
UpperCamelCase__ : int = self.bpe_decode
UpperCamelCase__ : Optional[int] = 2
UpperCamelCase__ : List[Any] = '''#'''
elif format == DecodeType.WORDPIECE:
UpperCamelCase__ : Tuple = self.wp_decode
UpperCamelCase__ : Any = 102
UpperCamelCase__ : Any = '''[SEP]'''
else:
raise ValueError(F"Format {format} is not supported." )
UpperCamelCase__ , UpperCamelCase__ : Any = [], []
UpperCamelCase__ : List[Any] = pred_logits.size(0 )
UpperCamelCase__ : List[Any] = pred_logits.size(1 )
UpperCamelCase__ , UpperCamelCase__ : List[Any] = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase__ , sorted=lowerCamelCase__ )
UpperCamelCase__ : Dict = preds_index.view(-1 , lowerCamelCase__ )[:, 1:]
UpperCamelCase__ : Union[str, Any] = decoder(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = torch.nn.functional.softmax(lowerCamelCase__ , dim=2 ).max(dim=2 )
UpperCamelCase__ : Dict = preds_max_prob[:, 1:]
for index in range(lowerCamelCase__ ):
UpperCamelCase__ : int = preds_str[index].find(lowerCamelCase__ )
UpperCamelCase__ : Dict = preds_str[index][:pred_eos]
UpperCamelCase__ : Optional[Any] = preds_index[index].cpu().tolist()
UpperCamelCase__ : Dict = pred_index.index(lowerCamelCase__ ) if eos_token in pred_index else -1
UpperCamelCase__ : str = preds_max_prob[index][: pred_eos_index + 1]
UpperCamelCase__ : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase__ )
conf_scores.append(lowerCamelCase__ )
return dec_strs, conf_scores
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : str = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase__ )]
return decode_strs
| 146
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCamelCase : str = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE : torch.nn.Module , SCREAMING_SNAKE_CASE : BnbQuantizationConfig , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE : Optional[List[str]] = None , SCREAMING_SNAKE_CASE : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = bnb_quantization_config.load_in_abit
UpperCamelCase__ : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase__ : int = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
UpperCamelCase__ : int = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase__ : List[Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : List[Any] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE )
# compatibility with peft
UpperCamelCase__ : Optional[Any] = load_in_abit
UpperCamelCase__ : List[str] = load_in_abit
UpperCamelCase__ : str = get_parameter_device(SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase__ : Union[str, Any] = replace_with_bnb_layers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
# convert param to the right dtype
UpperCamelCase__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase__ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE ):
param.to(SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCamelCase__ : str = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , modules_to_not_convert=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , max_memory=SCREAMING_SNAKE_CASE , no_split_module_classes=SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase__ : Dict = True
UpperCamelCase__ : str = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE , offload_state_dict=SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE , device_map=SCREAMING_SNAKE_CASE , offload_dir=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : str=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase__ : int = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase__ : str = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = special_dtypes
UpperCamelCase__ : Optional[int] = no_split_module_classes
UpperCamelCase__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase__ : Dict = get_balanced_memory(
SCREAMING_SNAKE_CASE , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Tuple = max_memory
UpperCamelCase__ : Dict = infer_auto_device_map(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
UpperCamelCase__ : List[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase__ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ , UpperCamelCase__ : Dict = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase__ : str = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase__ : Tuple = []
current_key_name.append(SCREAMING_SNAKE_CASE )
if isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase__ : int = '''.'''.join(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase__ : int = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase__ : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase__ : List[Any] = module.weight.data
if module.bias is not None:
UpperCamelCase__ : List[str] = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = True
if len(list(module.children() ) ) > 0:
UpperCamelCase__ , UpperCamelCase__ : Tuple = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _a ( SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase__ : Dict = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase__ : str = find_tied_parameters(SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase__ : int = sum(SCREAMING_SNAKE_CASE , [] )
UpperCamelCase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
UpperCamelCase__ : str = False
if hasattr(SCREAMING_SNAKE_CASE , '''base_model_prefix''' ):
UpperCamelCase__ : int = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase__ : Tuple = list(model.named_children() )
UpperCamelCase__ : str = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase__ : Dict = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
UpperCamelCase__ : int = ['''.weight''', '''.bias''']
UpperCamelCase__ : Optional[int] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase__ : int = name.replace(SCREAMING_SNAKE_CASE , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE )
return filtered_module_names
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def _a ( SCREAMING_SNAKE_CASE : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 0 , dtype=SCREAMING_SNAKE_CASE , value=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = param_name
UpperCamelCase__ : str = model
if "." in tensor_name:
UpperCamelCase__ : List[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase__ : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCamelCase__ : Optional[int] = new_module
UpperCamelCase__ : List[str] = splits[-1]
# offload weights
UpperCamelCase__ : Any = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE , )
else:
offload_weight(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
offload_weight(SCREAMING_SNAKE_CASE , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE , index=SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''meta''' , dtype=SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 146
| 1
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__SCREAMING_SNAKE_CASE =_symbol_database.Default()
__SCREAMING_SNAKE_CASE =_descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
__SCREAMING_SNAKE_CASE =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__SCREAMING_SNAKE_CASE =None
__SCREAMING_SNAKE_CASE =B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__SCREAMING_SNAKE_CASE =45
__SCREAMING_SNAKE_CASE =1581
__SCREAMING_SNAKE_CASE =1517
__SCREAMING_SNAKE_CASE =1570
__SCREAMING_SNAKE_CASE =1584
__SCREAMING_SNAKE_CASE =1793
__SCREAMING_SNAKE_CASE =1795
__SCREAMING_SNAKE_CASE =1916
__SCREAMING_SNAKE_CASE =1864
__SCREAMING_SNAKE_CASE =1905
__SCREAMING_SNAKE_CASE =1919
__SCREAMING_SNAKE_CASE =2429
__SCREAMING_SNAKE_CASE =2208
__SCREAMING_SNAKE_CASE =2418
__SCREAMING_SNAKE_CASE =2323
__SCREAMING_SNAKE_CASE =2407
# @@protoc_insertion_point(module_scope)
| 370
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 321
| 0
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(2 , 4 )
UpperCamelCase = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCamelCase = torch.optim.lr_scheduler.OneCycleLR(A__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCamelCase = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCamelCase = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@require_cuda
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = Accelerator(cpu=UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase = GradientState()
assert state.num_steps == 1
UpperCamelCase = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCamelCase = False
assert state.sync_gradients is False
GradientState._reset_state()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A ( self : Optional[Any] ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCamelCase__ : Dict , **UpperCamelCase__ : int ):
pass
with patch('torch.cuda.set_device' , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
UpperCamelCase = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = get_signature(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1E-3 )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = get_signature(UpperCamelCase__ )
# saving hook
def save_config(UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
UpperCamelCase = {'class_name': models[0].__class__.__name__}
with open(os.path.join(UpperCamelCase__ , 'data.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# loading hook
def load_config(UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ):
with open(os.path.join(UpperCamelCase__ , 'data.json' ) , 'r' ) as f:
UpperCamelCase = json.load(UpperCamelCase__ )
UpperCamelCase = config['class_name']
UpperCamelCase = accelerator.register_save_state_pre_hook(UpperCamelCase__ )
UpperCamelCase = accelerator.register_load_state_pre_hook(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCamelCase = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCamelCase = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
UpperCamelCase = None
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(dummy_obj is None )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = create_components()
UpperCamelCase = [1, 2, 3]
# This should work
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCamelCase__ , '_is_accelerate_prepared' , UpperCamelCase__ ) , UpperCamelCase__ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def A ( self : List[str] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCamelCase__ , device_map={'': 0} , )
UpperCamelCase = Accelerator()
# This should work
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
def A ( self : Tuple ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = Accelerator()
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCamelCase = infer_auto_device_map(UpperCamelCase__ )
UpperCamelCase = 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ )
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def A ( self : Optional[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
UpperCamelCase = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCamelCase = infer_auto_device_map(UpperCamelCase__ )
UpperCamelCase = 1
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
UpperCamelCase = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A ( self : Optional[Any] ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
UpperCamelCase = infer_auto_device_map(UpperCamelCase__ )
UpperCamelCase = 1
UpperCamelCase = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
UpperCamelCase = Accelerator()
# This should work
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
@require_cuda
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = torch.nn.Linear(1_0 , 1_0 )
UpperCamelCase = torch.optim.SGD(model.parameters() , lr=0.0_1 )
UpperCamelCase = Accelerator(cpu=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
| 28
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28
| 1
|
SCREAMING_SNAKE_CASE__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Dict = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 120
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = (IPNDMScheduler,)
lowercase = (('num_inference_steps', 50),)
def __lowercase ( self : Optional[Any] , **lowerCamelCase : int ) -> Tuple:
lowerCAmelCase_ : Dict = {"""num_train_timesteps""": 10_00}
config.update(**lowerCamelCase )
return config
def __lowercase ( self : Dict , lowerCamelCase : Tuple=0 , **lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
lowerCAmelCase_ : List[str] = self.dummy_sample
lowerCAmelCase_ : Optional[int] = 0.1 * sample
lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : List[str] = self.get_scheduler_config(**lowerCamelCase )
lowerCAmelCase_ : int = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowerCAmelCase_ : List[Any] = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
lowerCAmelCase_ : str = dummy_past_residuals[:]
lowerCAmelCase_ : int = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Dict = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Any = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : str ) -> Tuple:
pass
def __lowercase ( self : Tuple , lowerCamelCase : int=0 , **lowerCamelCase : Any ) -> int:
lowerCAmelCase_ : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
lowerCAmelCase_ : Tuple = self.dummy_sample
lowerCAmelCase_ : Optional[int] = 0.1 * sample
lowerCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : int = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[:]
lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : str = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[int] , **lowerCamelCase : Optional[Any] ) -> str:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : List[Any] = self.get_scheduler_config(**lowerCamelCase )
lowerCAmelCase_ : str = scheduler_class(**lowerCamelCase )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Optional[int] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : str = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**lowerCamelCase )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , """set_timesteps""" ):
lowerCAmelCase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase_ : List[str] = dummy_past_residuals[:]
lowerCAmelCase_ : List[str] = scheduler.timesteps[5]
lowerCAmelCase_ : str = scheduler.timesteps[6]
lowerCAmelCase_ : Dict = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowerCAmelCase_ : Any = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Any ) -> int:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase , time_step=lowerCamelCase )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=lowerCamelCase )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : List[Any] = self.full_loop()
lowerCAmelCase_ : Any = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 120
| 1
|
'''simple docstring'''
import enum
import shutil
import sys
a_ , a_ : List[str] = shutil.get_terminal_size()
a_ : Dict = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class __UpperCamelCase ( enum.Enum ):
lowercase : Optional[int] =0
lowercase : Any =1
def a_ ( __snake_case : Dict , __snake_case : Tuple="" ) -> Dict:
"""simple docstring"""
sys.stdout.write(str(__snake_case ) + end )
sys.stdout.flush()
def a_ ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Dict="" ) -> Optional[Any]:
"""simple docstring"""
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , __snake_case )
def a_ ( ) -> int:
"""simple docstring"""
forceWrite('''\r''' )
def a_ ( __snake_case : int , __snake_case : str ) -> Tuple:
"""simple docstring"""
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def a_ ( ) -> Any:
"""simple docstring"""
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def a_ ( ) -> int:
"""simple docstring"""
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 6
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = '''luke'''
def __init__( self , _UpperCamelCase=5_0_2_6_7 , _UpperCamelCase=5_0_0_0_0_0 , _UpperCamelCase=7_6_8 , _UpperCamelCase=2_5_6 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> str:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = entity_vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Optional[int] = entity_emb_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = use_entity_aware_attention
UpperCAmelCase_ : Optional[int] = classifier_dropout
| 29
|
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : str , __snake_case : list[str] | None = None , __snake_case : dict[str, float] | None = None , __snake_case : bool = False , ) -> tuple[int, float, str]:
"""simple docstring"""
lowerCamelCase_ =cipher_alphabet or [chr(__snake_case ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ ={
'''a''': 0.0_8_4_9_7,
'''b''': 0.0_1_4_9_2,
'''c''': 0.0_2_2_0_2,
'''d''': 0.0_4_2_5_3,
'''e''': 0.1_1_1_6_2,
'''f''': 0.0_2_2_2_8,
'''g''': 0.0_2_0_1_5,
'''h''': 0.0_6_0_9_4,
'''i''': 0.0_7_5_4_6,
'''j''': 0.0_0_1_5_3,
'''k''': 0.0_1_2_9_2,
'''l''': 0.0_4_0_2_5,
'''m''': 0.0_2_4_0_6,
'''n''': 0.0_6_7_4_9,
'''o''': 0.0_7_5_0_7,
'''p''': 0.0_1_9_2_9,
'''q''': 0.0_0_0_9_5,
'''r''': 0.0_7_5_8_7,
'''s''': 0.0_6_3_2_7,
'''t''': 0.0_9_3_5_6,
'''u''': 0.0_2_7_5_8,
'''v''': 0.0_0_9_7_8,
'''w''': 0.0_2_5_6_0,
'''x''': 0.0_0_1_5_0,
'''y''': 0.0_1_9_9_4,
'''z''': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ =frequencies_dict
if not case_sensitive:
lowerCamelCase_ =ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ ={}
# cycle through all of the shifts
for shift in range(len(__snake_case ) ):
lowerCamelCase_ =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ =(alphabet_letters.index(letter.lower() ) - shift) % len(
__snake_case )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.lower().count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ =decrypted_with_shift.count(__snake_case )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(__snake_case : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ =min(
__snake_case , key=__snake_case , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 75
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 304
| 1
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = 1
for i in range(1, num + 1 ):
fact *= i
return fact
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = 0
while number > 0:
UpperCAmelCase_ : Optional[Any] = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __a ( __lowerCamelCase = 100 ):
UpperCAmelCase_ : List[str] = factorial(__UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = split_and_add(__UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 61
|
'''simple docstring'''
from __future__ import annotations
import math
class a_ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
UpperCamelCase = size
# approximate the overall size of segment tree with given value
UpperCamelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
UpperCamelCase = [0 for i in range(0 , 4 * size )]
UpperCamelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return idx * 2 + 1
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if left_element == right_element:
UpperCamelCase = a[left_element - 1]
else:
UpperCamelCase = (left_element + right_element) // 2
self.build(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.build(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase = val
if left_element != right_element:
UpperCamelCase = val
UpperCamelCase = val
UpperCamelCase = True
UpperCamelCase = True
return True
UpperCamelCase = (left_element + right_element) // 2
self.update(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.update(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = max(
self.segment_tree[self.left(_SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(_SCREAMING_SNAKE_CASE )] )
return True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = False
if left_element != right_element:
UpperCamelCase = self.lazy[idx]
UpperCamelCase = self.lazy[idx]
UpperCamelCase = True
UpperCamelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase = (left_element + right_element) // 2
UpperCamelCase = self.query(self.left(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.query(self.right(_SCREAMING_SNAKE_CASE ) , mid + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 , 1 , self.size , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
SCREAMING_SNAKE_CASE__ = 1_5
SCREAMING_SNAKE_CASE__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 321
| 0
|
def a( A : int ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def a( A : int ) -> bool:
"""simple docstring"""
a = 0
a = number
while duplicate > 0:
a , a = divmod(A , 10 )
fact_sum += factorial(A )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
_lowercase: Union[str, Any] = int(input("Enter number: ").strip())
print(
F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 71
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase: str = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = XLMProphetNetTokenizer
__A = False
__A = True
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "[PAD]"
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase_ ) , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
a = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "Hello World!"
a = [35389, 6672, 49, 2]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 71
| 1
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
lowerCAmelCase : str = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 297
| 0
|
import random
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple:
lowerCAmelCase__ : Union[str, Any] = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE_ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE_ )
else:
equal.append(SCREAMING_SNAKE_CASE_ )
return less, equal, greater
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(SCREAMING_SNAKE_CASE_ ) or index < 0:
return None
lowerCAmelCase__ : Optional[int] = items[random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Tuple = _partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = len(SCREAMING_SNAKE_CASE_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE_ , index - (m + count) )
| 361
|
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 307
| 0
|
import enum
import shutil
import sys
A , A : Dict = shutil.get_terminal_size()
A : str = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A( enum.Enum ):
snake_case_ = 0
snake_case_ = 1
def __lowerCAmelCase ( a__ , a__="" ) -> Optional[int]:
sys.stdout.write(str(a__ ) + end )
sys.stdout.flush()
def __lowerCAmelCase ( a__ , a__ , a__="" ) -> Dict:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , a__ )
def __lowerCAmelCase ( ) -> Any:
forceWrite('''\r''' )
def __lowerCAmelCase ( a__ , a__ ) -> Any:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __lowerCAmelCase ( ) -> int:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ( ) -> Dict:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 6
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 6
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_UpperCamelCase = False
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self , __a=32 ) -> int:
"""simple docstring"""
set_seed(0 )
UpperCAmelCase__ = UNetaDModel(sample_size=__a , in_channels=3 , out_channels=3 )
UpperCAmelCase__ = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def UpperCamelCase__ (self ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCAmelCase__ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__a , )
UpperCAmelCase__ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule='linear' , clip_sample=__a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCAmelCase__ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__a ) for _ in range(4 )]
UpperCAmelCase__ = [torch.randn((4, 3, 32, 32) ).to(__a ) for _ in range(4 )]
UpperCAmelCase__ = [torch.randint(0 , 1000 , (4,) ).long().to(__a ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCAmelCase__ , UpperCAmelCase__ = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase__ = model(__a , timesteps[i] ).sample
UpperCAmelCase__ = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCAmelCase__ , UpperCAmelCase__ = self.get_model_optimizer(resolution=32 )
model.train().to(__a )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase__ = model(__a , timesteps[i] ).sample
UpperCAmelCase__ = torch.nn.functional.mse_loss(__a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
self.assertTrue(torch.allclose(__a , __a , atol=1E-5 ) )
| 366
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 335
| 0
|
'''simple docstring'''
from math import factorial
class snake_case__ :
def __init__( self : Optional[int] , _A : List[str] , _A : List[str] ) -> Any:
UpperCAmelCase_ : str = real
if isinstance(_A , _A ):
UpperCAmelCase_ : Dict = [1] * rank
else:
UpperCAmelCase_ : Optional[Any] = rank
def __repr__( self : Dict ) -> str:
return (
F"{self.real}+"
F"{'+'.join(str(_A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def A ( self : int ) -> str:
UpperCAmelCase_ : str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _A )
def __add__( self : Union[str, Any] , _A : Dict ) -> Dict:
if not isinstance(_A , _A ):
return Dual(self.real + other , self.duals )
UpperCAmelCase_ : Optional[Any] = self.duals.copy()
UpperCAmelCase_ : Union[str, Any] = other.duals.copy()
if len(_A ) > len(_A ):
o_dual.extend([1] * (len(_A ) - len(_A )) )
elif len(_A ) < len(_A ):
s_dual.extend([1] * (len(_A ) - len(_A )) )
UpperCAmelCase_ : str = []
for i in range(len(_A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _A )
a_ = __add__
def __sub__( self : int , _A : str ) -> Union[str, Any]:
return self + other * -1
def __mul__( self : Optional[int] , _A : Tuple ) -> List[str]:
if not isinstance(_A , _A ):
UpperCAmelCase_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _A )
UpperCAmelCase_ : Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _A )
a_ = __mul__
def __truediv__( self : Union[str, Any] , _A : Tuple ) -> Union[str, Any]:
if not isinstance(_A , _A ):
UpperCAmelCase_ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _A )
raise ValueError
def __floordiv__( self : Dict , _A : Optional[Any] ) -> Dict:
if not isinstance(_A , _A ):
UpperCAmelCase_ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _A )
raise ValueError
def __pow__( self : Tuple , _A : Tuple ) -> Tuple:
if n < 0 or isinstance(_A , _A ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCAmelCase_ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def __UpperCAmelCase ( A : Tuple , A : Any , A : Union[str, Any] ) -> Tuple:
if not callable(A ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(A , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(A , A ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCAmelCase_ : Any = Dual(A , 1 )
UpperCAmelCase_ : Any = func(A )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __UpperCAmelCase ( A : Union[str, Any] ) -> List[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 304
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class snake_case__ :
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42 # [batch_size x 3]
a_ = 42
a_ = 42
a_ = 42
a_ = 42
a_ = 42
def A ( self : Tuple ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def A ( self : List[Any] ) -> Union[str, Any]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def A ( self : Any ) -> Optional[Any]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def A ( self : Optional[int] ) -> torch.Tensor:
UpperCAmelCase_ : Dict = torch.arange(self.height * self.width )
UpperCAmelCase_ : int = torch.stack(
[
pixel_indices % self.width,
torch.div(_A , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape
UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) )
UpperCAmelCase_ : Any = self.get_image_coords()
UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A )
UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor:
UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 )
UpperCAmelCase_ : Union[str, Any] = self.resolution()
UpperCAmelCase_ : int = self.fov()
UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1
UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 )
UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 )
UpperCAmelCase_ : List[Any] = (
self.z.view(_A , 1 , 3 )
+ self.x.view(_A , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:]
)
UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A )
UpperCAmelCase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_A , *_A , 2 , 3 )
def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , )
def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
UpperCAmelCase_ : Optional[int] = -z * 4
UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] )
UpperCAmelCase_ : List[Any] = np.cross(A , A )
origins.append(A )
xs.append(A )
ys.append(A )
zs.append(A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
| 304
| 1
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
a : int = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir, """models/bert/""" ) )
UpperCAmelCase_: Dict = self.transformer_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE_, """src/transformers/models/bert/modeling_bert.py""" ), os.path.join(self.transformer_dir, """models/bert/modeling_bert.py""" ), )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Any = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
UpperCAmelCase_: List[str] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCAmelCase_: Optional[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCAmelCase_: Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119 )
UpperCAmelCase_: Dict = black.format_str(SCREAMING_SNAKE_CASE_, mode=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = os.path.join(self.transformer_dir, """new_code.py""" )
with open(SCREAMING_SNAKE_CASE_, """w""", newline="""\n""" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name, overwrite=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_, """r""" ) as f:
self.assertTrue(f.read(), SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: str = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""", """BertLMPredictionHead""", REFERENCE_CODE + """\n""", )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""", """BertLMPredictionHead""", SCREAMING_SNAKE_CASE_, )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""", """TestModelLMPredictionHead""", re.sub("""Bert""", """TestModel""", SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with a really long name
UpperCAmelCase_: Any = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}', f'{long_class_name}LMPredictionHead', re.sub("""Bert""", SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ), )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""", """TestModelLMPredictionHead""", SCREAMING_SNAKE_CASE_, overwrite_result=re.sub("""Bert""", """TestModel""", SCREAMING_SNAKE_CASE_ ), )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: Union[str, Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCAmelCase_: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCAmelCase_: Optional[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCAmelCase_: int = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
self.assertFalse(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCAmelCase_: Optional[int] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_: int = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCAmelCase_: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 364
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
a : Tuple = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 82
| 0
|
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ) -> Tuple:
# Construct model
if openai_config_file == "":
__UpperCamelCase : int =OpenAIGPTConfig()
else:
__UpperCamelCase : Any =OpenAIGPTConfig.from_json_file(a_ )
__UpperCamelCase : Any =OpenAIGPTModel(a_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a_ ,a_ ,a_ )
# Save pytorch-model
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A_ :Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 71
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : str =DDIMPipeline
UpperCamelCase__ : List[Any] =UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ : Tuple =PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
UpperCamelCase__ : Tuple =UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ : Any =False
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__UpperCamelCase : int =DDIMScheduler()
__UpperCamelCase : Optional[int] ={'unet': unet, 'scheduler': scheduler}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : str =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Optional[int] =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Tuple ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any ='cpu'
__UpperCamelCase : Optional[Any] =self.get_dummy_components()
__UpperCamelCase : Tuple =self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : int =pipe(**lowerCamelCase__ ).images
__UpperCamelCase : Dict =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__UpperCamelCase : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
__UpperCamelCase : Tuple =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str ='google/ddpm-cifar10-32'
__UpperCamelCase : str =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =DDIMScheduler()
__UpperCamelCase : List[Any] =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddim.to(lowerCamelCase__ )
ddim.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : List[str] =ddim(generator=lowerCamelCase__ , eta=0.0 , output_type='numpy' ).images
__UpperCamelCase : Union[str, Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase : str =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] ='google/ddpm-ema-bedroom-256'
__UpperCamelCase : Any =UNetaDModel.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int =DDIMScheduler.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : Dict =DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddpm.to(lowerCamelCase__ )
ddpm.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Tuple =torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =ddpm(generator=lowerCamelCase__ , output_type='numpy' ).images
__UpperCamelCase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase : Optional[Any] =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 71
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "ssube/stable-diffusion-x4-upscaler-onnx"
def snake_case_ ( self, SCREAMING_SNAKE_CASE_=0 ) -> str:
UpperCamelCase : str = floats_tensor((1, 3, 128, 128), rng=random.Random(__UpperCamelCase ) )
UpperCamelCase : List[Any] = torch.manual_seed(__UpperCamelCase )
UpperCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : Dict = self.get_dummy_inputs()
UpperCamelCase : Optional[int] = pipe(**__UpperCamelCase ).images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : str = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> Any:
UpperCamelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : Dict = self.get_dummy_inputs()
UpperCamelCase : Optional[int] = pipe(**__UpperCamelCase ).images
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Tuple = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : Dict = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**__UpperCamelCase ).images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Optional[int] = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : Tuple = self.get_dummy_inputs()
UpperCamelCase : Tuple = pipe(**__UpperCamelCase ).images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider' )
UpperCamelCase : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : List[Any] = self.get_dummy_inputs()
UpperCamelCase : Dict = pipe(**__UpperCamelCase ).images
UpperCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ) -> Dict:
UpperCamelCase : int = ort.SessionOptions()
UpperCamelCase : List[Any] = False
return options
def snake_case_ ( self ) -> Any:
UpperCamelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase : List[str] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : List[str] = 'A fantasy landscape, trending on artstation'
UpperCamelCase : Optional[Any] = torch.manual_seed(0 )
UpperCamelCase : int = pipe(
prompt=__UpperCamelCase, image=__UpperCamelCase, guidance_scale=7.5, num_inference_steps=10, generator=__UpperCamelCase, output_type='np', )
UpperCamelCase : List[Any] = output.images
UpperCamelCase : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Any = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case_ ( self ) -> str:
UpperCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCamelCase : Optional[int] = init_image.resize((128, 128) )
UpperCamelCase : Any = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', subfolder='scheduler' )
UpperCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx', scheduler=__UpperCamelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase : int = 'A fantasy landscape, trending on artstation'
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : Optional[int] = pipe(
prompt=__UpperCamelCase, image=__UpperCamelCase, guidance_scale=7.5, num_inference_steps=20, generator=__UpperCamelCase, output_type='np', )
UpperCamelCase : Dict = output.images
UpperCamelCase : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 360
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=4, ) -> Dict:
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Optional[int] = batch_size
UpperCamelCase : Optional[int] = seq_length
UpperCamelCase : Any = is_training
UpperCamelCase : Tuple = use_attention_mask
UpperCamelCase : Dict = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : Any = vocab_size
UpperCamelCase : Any = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : Optional[int] = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : Tuple = num_choices
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Dict = None
if self.use_attention_mask:
UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : int = None
if self.use_token_type_ids:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=SCREAMING_SNAKE_CASE_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ) -> int:
UpperCamelCase : List[str] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = config_and_inputs
UpperCamelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = config_and_inputs
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : int = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
UpperCamelCase : List[str] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Union[str, Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )[0]
UpperCamelCase : List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
UpperCamelCase : Optional[int] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
UpperCamelCase : Any = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__UpperCAmelCase = None
__UpperCAmelCase = {
"""7B""": 1_10_08,
"""13B""": 1_38_24,
"""30B""": 1_79_20,
"""65B""": 2_20_16,
"""70B""": 2_86_72,
}
__UpperCAmelCase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def _snake_case ( lowercase__ : str , lowercase__ : Dict=1 , lowercase__ : Union[str, Any]=2_5_6 ) -> str:
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def _snake_case ( lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
with open(_A , """r""" ) as f:
return json.load(_A )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
with open(_A , """w""" ) as f:
json.dump(_A , _A )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=True ) -> List[str]:
'''simple docstring'''
os.makedirs(_A , exist_ok=_A )
lowerCAmelCase_ :int = os.path.join(_A , """tmp""" )
os.makedirs(_A , exist_ok=_A )
lowerCAmelCase_ :List[str] = read_json(os.path.join(_A , """params.json""" ) )
lowerCAmelCase_ :str = NUM_SHARDS[model_size]
lowerCAmelCase_ :Union[str, Any] = params["""n_layers"""]
lowerCAmelCase_ :Dict = params["""n_heads"""]
lowerCAmelCase_ :Dict = n_heads // num_shards
lowerCAmelCase_ :Tuple = params["""dim"""]
lowerCAmelCase_ :List[str] = dim // n_heads
lowerCAmelCase_ :List[str] = 10000.0
lowerCAmelCase_ :Tuple = 1.0 / (base ** (torch.arange(0 , _A , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase_ :str = params["""n_kv_heads"""] # for GQA / MQA
lowerCAmelCase_ :Tuple = n_heads_per_shard // num_key_value_heads
lowerCAmelCase_ :str = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase_ :List[str] = n_heads
lowerCAmelCase_ :Any = n_heads_per_shard
lowerCAmelCase_ :Optional[int] = dim
# permute for sliced rotary
def permute(lowercase__ : Any , lowercase__ : List[Any]=n_heads , lowercase__ : Union[str, Any]=dim , lowercase__ : str=dim ):
return w.view(_A , dima // n_heads // 2 , 2 , _A ).transpose(1 , 2 ).reshape(_A , _A )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase_ :Union[str, Any] = torch.load(os.path.join(_A , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
lowerCAmelCase_ :Any = [
torch.load(os.path.join(_A , f"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(_A )
]
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Dict = {"""weight_map""": {}}
for layer_i in range(_A ):
lowerCAmelCase_ :Tuple = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCAmelCase_ :Optional[Any] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase_ :Union[str, Any] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowerCAmelCase_ :str = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) )
lowerCAmelCase_ :List[str] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A ) , _A , _A , _A , )
lowerCAmelCase_ :str = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
_A , _A , _A )
for i in range(_A )
] , dim=0 , ).reshape(_A , _A )
lowerCAmelCase_ :Dict = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(_A )] , dim=1 )
lowerCAmelCase_ :List[Any] = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(_A )] , dim=0 )
lowerCAmelCase_ :Union[str, Any] = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(_A )] , dim=1 )
lowerCAmelCase_ :str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(_A )] , dim=0 )
lowerCAmelCase_ :Any = inv_freq
for k, v in state_dict.items():
lowerCAmelCase_ :Any = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
lowerCAmelCase_ :int = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCAmelCase_ :List[str] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
lowerCAmelCase_ :List[Any] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_A )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_A )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase_ :Any = filename
param_count += v.numel()
torch.save(_A , os.path.join(_A , _A ) )
# Write configs
lowerCAmelCase_ :Union[str, Any] = {"""total_size""": param_count * 2}
write_json(_A , os.path.join(_A , """pytorch_model.bin.index.json""" ) )
lowerCAmelCase_ :Optional[int] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
lowerCAmelCase_ :Tuple = params["""multiple_of"""] if """multiple_of""" in params else 2_5_6
lowerCAmelCase_ :List[str] = LlamaConfig(
hidden_size=_A , intermediate_size=compute_intermediate_size(_A , _A , _A ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=_A , )
config.save_pretrained(_A )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
lowerCAmelCase_ :str = LlamaForCausalLM.from_pretrained(_A , torch_dtype=torch.floataa , low_cpu_mem_usage=_A )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_A , safe_serialization=_A )
shutil.rmtree(_A )
def _snake_case ( lowercase__ : Dict , lowercase__ : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowerCAmelCase_ :Dict = tokenizer_class(_A )
tokenizer.save_pretrained(_A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :int = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=_A , help="""Whether or not to save using `safetensors`.""" )
lowerCAmelCase_ :str = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase_ :Optional[int] = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , _A )
if __name__ == "__main__":
main()
| 84
|
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] ) -> Union[str, Any]:
snake_case__ = [
[],
[],
[],
]
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCamelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self: Union[str, Any] ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Union[str, Any] ) -> Any:
snake_case__ = []
def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None:
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case__ = min(self.queue )
self.queue.remove(UpperCamelCase )
return data
def __str__( self: Optional[Any] ) -> str:
return str(self.queue )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 307
| 0
|
import warnings
from functools import wraps
from typing import Callable
def __lowerCamelCase (UpperCAmelCase__ : str ):
@wraps(__snake_case )
def _inner_fn(*UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Tuple ):
warnings.warn(
(F"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , __snake_case , )
return fn(*__snake_case , **__snake_case )
return _inner_fn
| 364
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase :
lowercase__ : str = None
@experimental
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return _map_with_joblib(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = num_proc if num_proc <= len(UpperCAmelCase__ ) else len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // num_proc
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) % num_proc
SCREAMING_SNAKE_CASE = div * index + min(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(UpperCAmelCase__ )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(UpperCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (RLock(),), tqdm.set_lock
with Pool(UpperCAmelCase__ , initargs=UpperCAmelCase__ , initializer=UpperCAmelCase__ ) as pool:
SCREAMING_SNAKE_CASE = pool.map(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"Finished {num_proc} processes" )
SCREAMING_SNAKE_CASE = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(UpperCAmelCase__ )} objects" )
return mapped
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE = None
| 206
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None ):
if rng is None:
_lowerCamelCase : Tuple = random.Random()
_lowerCamelCase : Optional[int] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : List[str] = []
for _ in range(UpperCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : int = np.array(UpperCAmelCase_ , dtype=jnp.intaa ).reshape(UpperCAmelCase_ )
return output
def _snake_case ( lowercase__ , lowercase__=None ):
_lowerCamelCase : Optional[Any] = ids_tensor(UpperCAmelCase_ , vocab_size=2 , rng=UpperCAmelCase_ )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : Optional[Any] = 1
return attn_mask
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = ()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Union[str, Any] = inputs['input_ids'].shape[-1] // 2
_lowerCamelCase : int = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowerCamelCase : List[Any] = jnp.ones_like(lowercase )
_lowerCamelCase : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : int = max_length
_lowerCamelCase : List[str] = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(lowercase )
_lowerCamelCase : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Dict = getattr(lowercase , lowercase )
_lowerCamelCase : Dict = pt_model_class(lowercase ).eval()
_lowerCamelCase : Tuple = load_flax_weights_in_pytorch_model(lowercase , flax_model.params )
_lowerCamelCase : List[str] = flax_model.generate(lowercase ).sequences
_lowerCamelCase : Tuple = pt_model.generate(torch.tensor(lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : Dict = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Union[str, Any] = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_input_ids_and_config()
_lowerCamelCase : Dict = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : Tuple = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = max_length
_lowerCamelCase : Tuple = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : str = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : Any = 2
_lowerCamelCase : Tuple = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : int = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Dict = 0.8
_lowerCamelCase : Optional[Any] = 10
_lowerCamelCase : Tuple = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Optional[int] = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(lowercase )
_lowerCamelCase : Any = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : int = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : Dict = 1
_lowerCamelCase : int = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[Any] = model_class(lowercase )
_lowerCamelCase : Optional[Any] = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = max_length
_lowerCamelCase : Tuple = 2
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(lowercase )
_lowerCamelCase : List[Any] = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Optional[Any] = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : Any = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : Union[str, Any] = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : int = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : str = jit(model.generate )
_lowerCamelCase : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_lowerCamelCase : str = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCamelCase : Tuple = 'Hello world'
_lowerCamelCase : Optional[int] = tokenizer(lowercase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase , 'do_samples' ):
model.generate(lowercase , do_samples=lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase , 'foo' ):
_lowerCamelCase : str = {'foo': 'bar'}
model.generate(lowercase , **lowercase )
| 96
|
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335
| 0
|
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Optional[Any] = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 195
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = IFInpaintingSuperResolutionPipeline
snake_case__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case__ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
snake_case__ : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=0 ) -> List[str]:
if str(UpperCAmelCase__ ).startswith("mps" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(UpperCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase_ ( self : str ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self : str ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
self._test_save_load_local()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 195
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowercase: Any = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
__A = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__A = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__A = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__A = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = ZeroShotClassificationPipeline(
model=_snake_case , tokenizer=_snake_case , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# No kwarg
a = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
a = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
a = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
a = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(_snake_case , {"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case )], "scores": [ANY(_snake_case )]} )
# https://github.com/huggingface/transformers/issues/13846
a = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(1 )
] , )
a = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
_snake_case , [
{"sequence": ANY(_snake_case ), "labels": [ANY(_snake_case ), ANY(_snake_case )], "scores": [ANY(_snake_case ), ANY(_snake_case )]}
for i in range(2 )
] , )
with self.assertRaises(_snake_case ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier(_snake_case , candidate_labels="politics" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(_snake_case ):
classifier("Who are you voting for in 2020?" , candidate_labels=_snake_case )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(_snake_case ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_snake_case , )
self.run_entailment_id(_snake_case )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = zero_shot_classifier.model.config
a = config.labelaid
a = zero_shot_classifier.entailment_id
a = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
a = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
a = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
a = original_labelaid
self.assertEqual(_snake_case , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
a = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
a = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
a = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_snake_case , )
self.assertEqual(
nested_simplify(_snake_case ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 227
|
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = 0
while number > 0:
_lowerCAmelCase = number % 10
sum_of_digits += last_digit
_lowerCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCAmelCase ( snake_case = 1_00 ):
"""simple docstring"""
_lowerCAmelCase = factorial(snake_case )
_lowerCAmelCase = split_and_add(snake_case )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 82
| 0
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a = int(input('Enter number: ').strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 363
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23
| 0
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
snake_case_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case_ : Tuple = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def A (__A : Any , __A : List[str] , __A : Optional[int]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( a ):
def __init__( self : List[str] , _snake_case : UNetaDConditionModel , _snake_case : DDPMScheduler , _snake_case : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
UpperCAmelCase_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def lowerCamelCase ( self : int , _snake_case : List[str] , _snake_case : Any , _snake_case : int , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[Any]):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case)
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
UpperCAmelCase_ = latents.to(_snake_case)
UpperCAmelCase_ = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self : Any , _snake_case : Union[str, Any]=0):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""")
UpperCAmelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=0):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
UpperCAmelCase_ = torch.device(F"""cuda:{gpu_id}""")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_snake_case)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ = cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case)
# We'll offload the last model manually.
UpperCAmelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case)
def __call__( self : List[Any] , _snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , _snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , _snake_case : int = 512 , _snake_case : int = 512 , _snake_case : int = 100 , _snake_case : float = 4.0 , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[torch.FloatTensor] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ):
"""simple docstring"""
UpperCAmelCase_ = self._execution_device
UpperCAmelCase_ = guidance_scale > 1.0
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = torch.cat(_snake_case , dim=0)
UpperCAmelCase_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = torch.cat(_snake_case , dim=0)
if do_classifier_free_guidance:
UpperCAmelCase_ = image_embeds.repeat_interleave(_snake_case , dim=0)
UpperCAmelCase_ = negative_image_embeds.repeat_interleave(_snake_case , dim=0)
UpperCAmelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=_snake_case)
self.scheduler.set_timesteps(_snake_case , device=_snake_case)
UpperCAmelCase_ = self.scheduler.timesteps
UpperCAmelCase_ = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ = downscale_height_and_width(_snake_case , _snake_case , self.movq_scale_factor)
# create initial latent
UpperCAmelCase_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case)):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
UpperCAmelCase_ = {'''image_embeds''': image_embeds}
UpperCAmelCase_ = self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1)
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.chunk(2)
UpperCAmelCase_ , UpperCAmelCase_ = variance_pred.chunk(2)
UpperCAmelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , )[0]
# post-processing
UpperCAmelCase_ = self.movq.decode(_snake_case , force_not_quantize=_snake_case)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""")
if output_type in ["np", "pil"]:
UpperCAmelCase_ = image * 0.5 + 0.5
UpperCAmelCase_ = image.clamp(0 , 1)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 51
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
A__ : Union[str, Any] = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
A__ : Optional[int] = '''hopper-medium-v2'''
A__ : int = gym.make(env_name)
A__ : Optional[int] = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
A__ : int = env.reset()
A__ : Optional[int] = 0
A__ : Union[str, Any] = 0
A__ : Union[str, Any] = 1000
A__ : Optional[Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
A__ : Union[str, Any] = pipeline(obs, planning_horizon=32)
# execute action in environment
A__ , A__ , A__ , A__ : str = env.step(denorm_actions)
A__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
A__ : List[str] = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 103
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''wav2vec2'''
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.0_5 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="sum" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
__a : Tuple = hidden_size
__a : Dict = feat_extract_norm
__a : Optional[Any] = feat_extract_activation
__a : Optional[int] = list(_UpperCAmelCase )
__a : Union[str, Any] = list(_UpperCAmelCase )
__a : List[str] = list(_UpperCAmelCase )
__a : str = conv_bias
__a : str = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Tuple = len(self.conv_dim )
__a : List[str] = num_hidden_layers
__a : str = intermediate_size
__a : Tuple = hidden_act
__a : Optional[Any] = num_attention_heads
__a : Optional[Any] = hidden_dropout
__a : Dict = attention_dropout
__a : int = activation_dropout
__a : List[str] = feat_proj_dropout
__a : List[str] = final_dropout
__a : Optional[Any] = layerdrop
__a : Dict = layer_norm_eps
__a : Union[str, Any] = initializer_range
__a : int = vocab_size
__a : List[str] = do_stable_layer_norm
__a : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Optional[Any] = apply_spec_augment
__a : Any = mask_time_prob
__a : Union[str, Any] = mask_time_length
__a : Optional[Any] = mask_time_min_masks
__a : Tuple = mask_feature_prob
__a : List[Any] = mask_feature_length
__a : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : Any = num_codevectors_per_group
__a : Optional[Any] = num_codevector_groups
__a : Optional[int] = contrastive_logits_temperature
__a : Optional[int] = feat_quantizer_dropout
__a : Optional[Any] = num_negatives
__a : Union[str, Any] = codevector_dim
__a : Any = proj_codevector_dim
__a : Optional[Any] = diversity_loss_weight
# ctc loss
__a : Dict = ctc_loss_reduction
__a : List[Any] = ctc_zero_infinity
# adapter
__a : Any = add_adapter
__a : Optional[int] = adapter_kernel_size
__a : Union[str, Any] = adapter_stride
__a : str = num_adapter_layers
__a : str = output_hidden_size or hidden_size
__a : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a : int = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a : Union[str, Any] = list(_UpperCAmelCase )
__a : Optional[int] = list(_UpperCAmelCase )
__a : Any = list(_UpperCAmelCase )
__a : int = xvector_output_dim
@property
def _lowerCamelCase ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 188
|
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
A = TypeVar('''T''')
class __lowercase ( Generic[T] ):
'''simple docstring'''
__lowerCAmelCase = 42 # Cache store of keys
__lowerCAmelCase = 42 # References of the keys in cache
__lowerCAmelCase = 10 # Maximum capacity of cache
def __init__( self , _UpperCAmelCase ):
__a : Optional[int] = deque()
__a : Dict = set()
if not n:
__a : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
__a : str = n
def _lowerCamelCase ( self , _UpperCAmelCase ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__a : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def _lowerCamelCase ( self ):
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ):
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
A = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 188
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Optional[Any] ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCamelCase__ , lowerCamelCase__ )
if "finetuned" not in model_name:
A_ : Dict = False
if "finetuned" in model_name:
A_ : List[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
A_ : Dict = 4_00
A_ : List[str] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ : Tuple = 1_74
A_ : str = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if "small" in model_name:
A_ : int = 3_84
A_ : Union[str, Any] = 15_36
A_ : List[str] = 12
A_ : Optional[int] = 16
A_ : Any = 12
A_ : int = 3
A_ : Optional[Any] = 1_92
A_ : Union[str, Any] = 7_68
elif "large" in model_name:
A_ : List[Any] = 10_24
A_ : Optional[Any] = 40_96
A_ : Optional[Any] = 24
A_ : List[str] = 16
A_ : Any = 12
A_ : str = 8
A_ : str = 5_12
A_ : int = 20_48
elif "huge" in model_name:
A_ : Optional[Any] = 12_80
A_ : str = 51_20
A_ : str = 32
A_ : int = 16
A_ : Any = 12
A_ : Union[str, Any] = 8
A_ : Dict = 6_40
A_ : Optional[Any] = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "encoder." in name:
A_ : List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
A_ : List[str] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ : int = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : Dict = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ : List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
A_ : List[str] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ : str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
A_ : Union[str, Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
A_ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ : Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ : Dict = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ : List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ : Optional[Any] = name.replace("""head""" , """classifier""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : str = orig_state_dict.pop(lowerCamelCase__ )
if key.startswith("""encoder.""" ):
A_ : Tuple = key.replace("""encoder.""" , """""" )
if "qkv" in key:
A_ : Optional[int] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ : Union[str, Any] = config.decoder_hidden_size
A_ : Any = int(key_split[2] )
A_ : int = """decoder.decoder_layers."""
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Any = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
else:
A_ : List[Any] = config.hidden_size
A_ : List[Any] = int(key_split[1] )
A_ : int = """videomae.encoder.layer."""
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : Union[str, Any] = val[dim : dim * 2, :]
A_ : List[str] = val[-dim:, :]
else:
A_ : Union[str, Any] = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = get_videomae_config(lowerCamelCase__ )
if "finetuned" in model_name:
A_ : List[str] = VideoMAEForVideoClassification(lowerCamelCase__ )
else:
A_ : Optional[Any] = VideoMAEForPreTraining(lowerCamelCase__ )
# download original checkpoint, hosted on Google Drive
A_ : Optional[Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : Any = torch.load(lowerCamelCase__ , map_location="""cpu""" )
if "model" in files:
A_ : Any = files["""model"""]
else:
A_ : Dict = files["""module"""]
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify model on basic input
A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A_ : Union[str, Any] = prepare_video()
A_ : str = image_processor(lowerCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Optional[Any] = torch.load(lowerCamelCase__ )
A_ : Dict = model(**lowerCamelCase__ )
A_ : List[Any] = outputs.logits
A_ : Any = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ : str = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ : str = torch.Size([1, 1_74] )
A_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ : Tuple = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ : List[Any] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ : str = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ : int = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ : List[Any] = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Tuple = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
A_ : List[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : Any = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ : Optional[int] = outputs.loss
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 206
| 0
|
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[Union[str, int]] = None
_UpperCamelCase : Optional[Union[str, int]] = None
_UpperCamelCase : Optional[Union[str, int]] = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = _str_to_version_tuple(self.version_str )
def __repr__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Tuple )-> str:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return Version(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return other
raise TypeError(f"""{other} (type {type(_UpperCAmelCase )}) cannot be compared to version.""" )
def __eq__( self : List[str] , a : int )-> Dict:
"""simple docstring"""
try:
lowercase__ = self._validate_operand(_UpperCAmelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = self._validate_operand(_UpperCAmelCase )
return self.tuple < other.tuple
def __hash__( self : List[str] )-> Optional[int]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any , a : int )-> List[Any]:
"""simple docstring"""
lowercase__ = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
return self.version_str
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = _VERSION_REG.match(lowerCAmelCase_ )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowerCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
return ".".join(str(lowerCAmelCase_ ) for v in version_tuple )
| 369
|
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_UpperCamelCase : Optional[str] = field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
_UpperCamelCase : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def __UpperCamelCase () -> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
lowercase__ = import_module('tasks' )
try:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ = token_classification_task.get_labels(data_args.labels )
lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ = (
TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]:
lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 )
lowercase__ , lowercase__ = preds.shape
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
"f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
}
# Data collator
lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(_SCREAMING_SNAKE_CASE )
# Predict
if training_args.do_predict:
lowercase__ = TokenClassificationDataset(
token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 269
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
UpperCAmelCase = '''▁'''
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ["""input_ids""", """token_type_ids"""]
_UpperCamelCase : List[str] = FNetTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=False , snake_case=True , snake_case=True , snake_case="<unk>" , snake_case="[SEP]" , snake_case="<pad>" , snake_case="[CLS]" , snake_case="[MASK]" , **snake_case , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase = (
AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case , normalized=snake_case )
if isinstance(snake_case , snake_case )
else mask_token
)
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 195
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowercase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowercase = self.get_image_processor(do_normalize=snake_case )
lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(snake_case , return_tensors='np' )
lowercase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = processor(text=snake_case )
lowercase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(snake_case )
lowercase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 195
| 1
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ (lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Tuple , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = int(np.ceil((x_end - xa) / h ) )
UpperCAmelCase_: str = np.zeros((n + 1,) )
UpperCAmelCase_: Union[str, Any] = ya
UpperCAmelCase_: Dict = xa
for k in range(snake_case_ ):
UpperCAmelCase_: int = f(snake_case_ , y[k] )
UpperCAmelCase_: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_: Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCAmelCase_: str = f(x + h , y[k] + h * ka )
UpperCAmelCase_: List[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
"""simple docstring"""
def __init__( self : List[Any] ,snake_case : Any ,snake_case : List[str]=3 ,snake_case : str=32 ,snake_case : Tuple=3 ,snake_case : Dict=10 ,snake_case : List[Any]=[10, 20, 30, 40] ,snake_case : List[Any]=[1, 1, 2, 1] ,snake_case : Any=True ,snake_case : Dict=True ,snake_case : Any="relu" ,snake_case : Union[str, Any]=3 ,snake_case : List[str]=None ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =embeddings_size
SCREAMING_SNAKE_CASE =hidden_sizes
SCREAMING_SNAKE_CASE =depths
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =len(__snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : int ):
return ResNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,)
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : List[Any] ):
SCREAMING_SNAKE_CASE =TFResNetModel(config=__snake_case )
SCREAMING_SNAKE_CASE =model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _lowerCAmelCase ( self : List[str] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =TFResNetForImageClassification(__snake_case )
SCREAMING_SNAKE_CASE =model(__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__UpperCAmelCase = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =TFResNetModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=__snake_case ,has_text_modality=__snake_case )
def _lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _lowerCAmelCase ( self : str ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(__snake_case )
SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__snake_case )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def _lowerCAmelCase ( self : Dict ):
def check_hidden_states_output(snake_case : int ,snake_case : List[Any] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =model_class(__snake_case )
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(__snake_case ,__snake_case ) )
SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(__snake_case ) ,expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE =layer_type
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(__snake_case ,__snake_case ,__snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =TFResNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : str ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=__snake_case ,return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE =model(**__snake_case )
# verify the logits
SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__snake_case )
SCREAMING_SNAKE_CASE =tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() ,__snake_case ,atol=1e-4 ) )
| 334
|
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase : Dict = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Union[str, Any] = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Optional[Any] = Text('''CPU''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase : Optional[Any] = [mem.copy() for i in range(4 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''GPU''' , font_size=24 )
UpperCAmelCase : Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : List[str] = Text('''Model''' , font_size=24 )
UpperCAmelCase : Tuple = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
UpperCAmelCase : Any = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
UpperCAmelCase : int = [mem.copy() for i in range(6 )]
UpperCAmelCase : int = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase : Any = Text('''Loaded Checkpoint''' , font_size=24 )
UpperCAmelCase : Union[str, Any] = Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
UpperCAmelCase : Tuple = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase : List[Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
UpperCAmelCase : Tuple = []
UpperCAmelCase : int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase : Any = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
UpperCAmelCase : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 23
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class __snake_case ( __lowerCAmelCase ):
a__ = """ctrl"""
a__ = ["""past_key_values"""]
a__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=24_65_34 , lowercase=2_56 , lowercase=12_80 , lowercase=81_92 , lowercase=48 , lowercase=16 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-6 , lowercase=0.02 , lowercase=True , **lowercase , ) -> Dict:
'''simple docstring'''
a__: Any = vocab_size
a__: str = n_positions
a__: Optional[Any] = n_embd
a__: Optional[int] = n_layer
a__: List[str] = n_head
a__: Union[str, Any] = dff
a__: List[str] = resid_pdrop
a__: Union[str, Any] = embd_pdrop
a__: Union[str, Any] = layer_norm_epsilon
a__: Any = initializer_range
a__: List[str] = use_cache
super().__init__(**lowercase)
| 203
|
"""simple docstring"""
import math
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase__ = 'Enter the base and the power separated by a comma: '
lowercase__ , lowercase__ = map(int, input(prompt).split(','))
lowercase__ , lowercase__ = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase__ = res(xa, ya)
lowercase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 203
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self, **lowercase_ ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_ )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self, lowercase_, **lowercase_ ) -> Tuple:
"""simple docstring"""
return super().__call__(lowercase_, **lowercase_ )
def _UpperCAmelCase ( self, **lowercase_ ) -> int:
"""simple docstring"""
a__ ={}
if "candidate_labels" in kwargs:
a__ =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
a__ =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self, lowercase_, lowercase_=None, lowercase_="This is a sound of {}." ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a__ =requests.get(lowercase_ ).content
else:
with open(lowercase_, '''rb''' ) as f:
a__ =f.read()
if isinstance(lowercase_, lowercase_ ):
a__ =ffmpeg_read(lowercase_, self.feature_extractor.sampling_rate )
if not isinstance(lowercase_, np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
a__ =self.feature_extractor(
[audio], sampling_rate=self.feature_extractor.sampling_rate, return_tensors='''pt''' )
a__ =candidate_labels
a__ =[hypothesis_template.format(lowercase_ ) for x in candidate_labels]
a__ =self.tokenizer(lowercase_, return_tensors=self.framework, padding=lowercase_ )
a__ =[text_inputs]
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =model_inputs.pop('''candidate_labels''' )
a__ =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0], lowercase_ ):
a__ =text_inputs[0]
else:
# Batching case.
a__ =text_inputs[0][0]
a__ =self.model(**lowercase_, **lowercase_ )
a__ ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def _UpperCAmelCase ( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =model_outputs.pop('''candidate_labels''' )
a__ =model_outputs['''logits'''][0]
if self.framework == "pt":
a__ =logits.softmax(dim=0 )
a__ =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
a__ =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowercase_, lowercase_ ), key=lambda lowercase_ : -x[0] )
]
return result
| 188
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =data
a__ =[0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def _UpperCAmelCase ( lowercase_, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
a__ =self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =list(struct.unpack('''>16L''', lowercase_ ) ) + [0] * 64
for i in range(16, 80 ):
a__ =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.padding()
a__ =self.split_blocks()
for block in self.blocks:
a__ =self.expand_block(lowercase_ )
a__, a__, a__, a__, a__ =self.h
for i in range(0, 80 ):
if 0 <= i < 20:
a__ =(b & c) | ((~b) & d)
a__ =0X5a827999
elif 20 <= i < 40:
a__ =b ^ c ^ d
a__ =0X6ed9eba1
elif 40 <= i < 60:
a__ =(b & c) | (b & d) | (c & d)
a__ =0X8f1bbcdc
elif 60 <= i < 80:
a__ =b ^ c ^ d
a__ =0Xca62c1d6
a__, a__, a__, a__, a__ =(
self.rotate(lowercase_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(lowercase_, 30 ),
c,
d,
)
a__ =(
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =b'''Test String'''
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
a__ =parser.parse_args()
a__ =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
a__ =f.read()
else:
a__ =bytes(_A , '''utf-8''' )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 188
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class A ( __lowerCAmelCase ):
def __init__(self : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=None , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if config is None:
assert isinstance(self.model , lowerCamelCase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f""" {self.model.__class__}"""
)
UpperCAmelCase__ = self.model.config
else:
UpperCAmelCase__ = config
UpperCAmelCase__ = data_args
UpperCAmelCase__ = self.config.tgt_vocab_size if isinstance(self.config , lowerCamelCase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
UpperCAmelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCAmelCase__ = label_smoothed_nll_loss
def lowercase_ (self : List[Any] , __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
if self.optimizer is None:
UpperCAmelCase__ = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
UpperCAmelCase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCAmelCase__ = Adafactor
UpperCAmelCase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
UpperCAmelCase__ = AdamW
UpperCAmelCase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
UpperCAmelCase__ = self.args.learning_rate
if self.sharded_ddp:
UpperCAmelCase__ = OSS(
params=lowerCamelCase__ , optim=lowerCamelCase__ , **lowerCamelCase__ , )
else:
UpperCAmelCase__ = optimizer_cls(lowerCamelCase__ , **lowerCamelCase__ )
if self.lr_scheduler is None:
UpperCAmelCase__ = self._get_lr_scheduler(lowerCamelCase__ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCAmelCase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCAmelCase__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
UpperCAmelCase__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowerCamelCase__ )
return scheduler
def lowercase_ (self : Any ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCAmelCase__ = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
UpperCAmelCase__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
UpperCAmelCase__ = model(**lowerCamelCase__ , labels=lowerCamelCase__ , use_cache=lowerCamelCase__ )[:2]
else:
# compute label smoothed loss
UpperCAmelCase__ = model(**lowerCamelCase__ , use_cache=lowerCamelCase__ )[0]
UpperCAmelCase__ = torch.nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
UpperCAmelCase__ = self.loss_fn(lowerCamelCase__ , lowerCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = inputs.pop("labels" )
UpperCAmelCase__ = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return loss
def lowercase_ (self : List[Any] , __UpperCAmelCase : nn.Module , __UpperCAmelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
UpperCAmelCase__ = self._prepare_inputs(lowerCamelCase__ )
UpperCAmelCase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCAmelCase__ = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **lowerCamelCase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase__ = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs["max_length"] )
UpperCAmelCase__ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCAmelCase__ = self._compute_loss(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCAmelCase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCAmelCase__ = self._pad_tensors_to_max_len(lowerCamelCase__ , gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase_ (self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f""" padded to `max_length`={max_length}""" )
UpperCAmelCase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
UpperCAmelCase__ = tensor
return padded_tensor
| 357
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase__ = 'CompVis/stable-diffusion-v1-4'
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : bool = True , ) -> Tuple:
"""simple docstring"""
super()._init_()
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = StableDiffusionPipeline(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , requires_safety_checker=__UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase_ (self : int ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , __UpperCAmelCase ) for k in self.config.keys() if not k.startswith("_" )}
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Optional[Any] , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : List[str] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
@torch.no_grad()
def lowercase_ (self : int , __UpperCAmelCase : Union[str, List[str]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_0 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(__UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase__ = self.textaimg_sda_a(
prompt=__UpperCAmelCase , height=__UpperCAmelCase , width=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , output_type=__UpperCAmelCase , return_dict=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=__UpperCAmelCase , **__UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 143
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__SCREAMING_SNAKE_CASE = len(UpperCamelCase_ ) if (len(UpperCamelCase_ ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(UpperCamelCase_ ) , """Postfix""".center(UpperCamelCase_ ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase_ ) == 0:
stack.append(UpperCamelCase_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase_ ) # push x to stack
print(
x.center(8 ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
while len(UpperCamelCase_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , ("""""".join(UpperCamelCase_ )).ljust(UpperCamelCase_ ) , sep=""" | """ , ) # Output in tabular format
return "".join(UpperCamelCase_ ) # return Postfix as str
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase_ ) ):
if infix[i] == "(":
__SCREAMING_SNAKE_CASE = """)""" # change "(" to ")"
elif infix[i] == ")":
__SCREAMING_SNAKE_CASE = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(UpperCamelCase_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ = input("\nEnter an Infix Equation = ") # Input an Infix equation
__magic_name__ = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 100
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
__snake_case : Optional[Any] = [8, 5, 9, 7]
__snake_case : List[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__snake_case : Optional[int] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
'''simple docstring'''
def __init__( self: Any , _SCREAMING_SNAKE_CASE: list[int] , _SCREAMING_SNAKE_CASE: list[list[int]] , _SCREAMING_SNAKE_CASE: list[list[int]] , ) -> None:
"""simple docstring"""
__lowerCAmelCase : Any = claim_vector
__lowerCAmelCase : Tuple = allocated_resources_table
__lowerCAmelCase : Tuple = maximum_claim_table
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def _SCREAMING_SNAKE_CASE ( self: int) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def _SCREAMING_SNAKE_CASE ( self: int) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_SCREAMING_SNAKE_CASE))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(_SCREAMING_SNAKE_CASE): i for i in self.__need()}
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: List[Any]) -> None:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : Dict = self.__available_resources()
__lowerCAmelCase : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n")
while need_list:
__lowerCAmelCase : int = False
for each_need in need_list:
__lowerCAmelCase : Dict = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE):
if need > available_resources[index]:
__lowerCAmelCase : Dict = False
break
if execution:
__lowerCAmelCase : Any = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE)
# update available/freed resources stack
__lowerCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE) + np.array(
alloc_resources_table[process_number])
print(
"Updated available resource stack for processes: "
+ " ".join([str(_SCREAMING_SNAKE_CASE) for x in available_resources]))
break
if safe:
print("The process is in a safe state.\n")
else:
print("System in unsafe state. Aborting...\n")
break
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> List[Any]:
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table")
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(" " * 9 + "System Resource Table")
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE) + 1}"""
+ " ".join(F"""{it:>8}""" for it in item)
+ "\n")
print(
"Current Usage by Active Processes: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__claim_vector))
print(
"Initial Available Resources: "
+ " ".join(str(_SCREAMING_SNAKE_CASE) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 269
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowerCAmelCase__:
'''simple docstring'''
__snake_case = BlenderbotSmallConfig
__snake_case = {}
__snake_case = 'gelu'
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=9_9 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=4 , __lowerCamelCase=3_7 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=2_0 , __lowerCamelCase=2 , __lowerCamelCase=1 , __lowerCamelCase=0 , ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = parent
_SCREAMING_SNAKE_CASE : Tuple = batch_size
_SCREAMING_SNAKE_CASE : Dict = seq_length
_SCREAMING_SNAKE_CASE : List[str] = is_training
_SCREAMING_SNAKE_CASE : List[str] = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE : str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : List[Any] = prepare_blenderbot_small_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Any = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder()
_SCREAMING_SNAKE_CASE : Dict = inputs_dict["input_ids"]
_SCREAMING_SNAKE_CASE : List[Any] = input_ids[:1, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict["attention_mask"][:1, :]
_SCREAMING_SNAKE_CASE : List[str] = inputs_dict["head_mask"]
_SCREAMING_SNAKE_CASE : int = 1
# first forward pass
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , head_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, __lowerCamelCase=None, ):
if attention_mask is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.cast(tf.math.not_equal(__lowerCamelCase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_SCREAMING_SNAKE_CASE : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFBlenderbotSmallModelTester(self )
_SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
@require_tokenizers
@require_tf
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text , return_tensors="tf" )
_SCREAMING_SNAKE_CASE : Dict = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__lowerCamelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ ={
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ =[
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 325
| 1
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
UpperCAmelCase_ :Optional[datasets.Features] = None
def _snake_case ( lowercase__ : "pyspark.sql.DataFrame" , lowercase__ : List[int] , ) -> Any:
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ :List[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ :Optional[int] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
lowerCAmelCase_ :Optional[Any] = partition_df.collect()
lowerCAmelCase_ :Dict = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _SCREAMING_SNAKE_CASE ( _BaseExamplesIterable ):
def __init__( self , __A , __A=None , ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = df
lowerCAmelCase_ :str = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ :int = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Tuple:
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :List[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__A )
return SparkExamplesIterable(self.df , partition_order=__A )
def __lowerCAmelCase ( self , __A , __A ) -> "SparkExamplesIterable":
lowerCAmelCase_ :Optional[Any] = self.split_shard_indices_by_worker(__A , __A )
return SparkExamplesIterable(self.df , partition_order=__A )
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.partition_order )
class _SCREAMING_SNAKE_CASE ( datasets.DatasetBuilder ):
UpperCAmelCase_ :Optional[Any] = SparkConfig
def __init__( self , __A , __A = None , __A = None , **__A , ) -> int:
import pyspark
lowerCAmelCase_ :Tuple = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ :Union[str, Any] = df
lowerCAmelCase_ :Optional[Any] = working_dir
super().__init__(
cache_dir=__A , config_name=str(self.df.semanticHash() ) , **__A , )
def __lowerCAmelCase ( self ) -> int:
# Returns the path of the created file.
def create_cache_and_write_probe(__A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__A )
lowerCAmelCase_ :Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ :int = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , __A ) -> Any:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
import pyspark
def get_arrow_batch_size(__A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ :Tuple = self.df.count()
lowerCAmelCase_ :Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ :Tuple = (
self.df.limit(__A )
.repartition(1 )
.mapInArrow(__A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ :List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ :str = min(__A , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ :Optional[int] = self.df.repartition(__A )
def __lowerCAmelCase ( self , __A , __A , __A , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ :Optional[int] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ :Dict = os.path.join(self._working_dir , os.path.basename(__A ) ) if self._working_dir else fpath
lowerCAmelCase_ :Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ :List[str] = self.config.features
lowerCAmelCase_ :List[Any] = self._writer_batch_size
lowerCAmelCase_ :str = self._fs.storage_options
def write_arrow(__A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ :Dict = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ :int = next(__A , __A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :List[str] = writer_class(
features=__A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :int = pa.Table.from_batches([first_batch] )
writer.write_table(__A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ :int = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__A , storage_options=__A , embed_local_files=__A , )
lowerCAmelCase_ :Any = pa.Table.from_batches([batch] )
writer.write_table(__A )
if writer._num_bytes > 0:
lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__A ) ):
lowerCAmelCase_ :Optional[int] = os.path.join(os.path.dirname(__A ) , os.path.basename(__A ) )
shutil.move(__A , __A )
lowerCAmelCase_ :Optional[int] = (
self.df.mapInArrow(__A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self , __A , __A = "arrow" , __A = None , __A = None , **__A , ) -> Any:
self._validate_cache_dir()
lowerCAmelCase_ :Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__A )
lowerCAmelCase_ :Optional[Any] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ :Tuple = os.path.join if is_local else posixpath.join
lowerCAmelCase_ :List[Any] = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ :int = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowerCAmelCase_ :Optional[Any] = path_join(self._output_dir , __A )
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :List[str] = []
for task_id, content in self._prepare_split_single(__A , __A , __A ):
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__A )
lowerCAmelCase_ :Optional[int] = total_num_examples
lowerCAmelCase_ :Tuple = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowerCAmelCase_ :Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ :List[str] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__A , __A , __A , ):
rename(
__A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
lowerCAmelCase_ :Tuple = []
lowerCAmelCase_ :Tuple = 0
for i in range(len(__A ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Dict = task_id_and_num_shards[i]
for shard_id in range(__A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__A , len(__A ) ).map(lambda __A : _rename_shard(*__A ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__A , """""" ) , )
def __lowerCAmelCase ( self , __A , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 84
|
from collections.abc import Iterable
from typing import Generic, TypeVar
A__ = TypeVar("""_T""")
class __lowerCAmelCase ( Generic[_T] ):
def __init__( self , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = list(iterable or [] )
_lowerCAmelCase = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def snake_case ( self , _snake_case ):
"""simple docstring"""
self._stacka.append(_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._stacka.pop
_lowerCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : int = HfArgumentParser(_lowercase )
a : int = parser.parse_args_into_dataclasses()[0]
a : Any = TensorFlowBenchmark(args=_lowercase )
try:
a : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
a : Tuple = " ".join(str(_lowercase ).split(" " )[:-1] )
a : Any = ""
a : Any = eval(str(_lowercase ).split(" " )[-1] )
a : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_lowercase )
if len(_lowercase ) > 0:
a : Tuple = full_error_msg + begin_error_msg + str(_lowercase )
raise ValueError(_lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 79
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=99 , UpperCamelCase__=0 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__="last" , UpperCamelCase__=None , UpperCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = parent
snake_case : str = batch_size
snake_case : Any = seq_length
snake_case : Dict = is_training
snake_case : Optional[int] = use_input_lengths
snake_case : Tuple = use_token_type_ids
snake_case : Optional[Any] = use_labels
snake_case : int = gelu_activation
snake_case : str = sinusoidal_embeddings
snake_case : Optional[int] = causal
snake_case : Tuple = asm
snake_case : Union[str, Any] = n_langs
snake_case : Tuple = vocab_size
snake_case : int = n_special
snake_case : Optional[int] = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = hidden_dropout_prob
snake_case : List[str] = attention_probs_dropout_prob
snake_case : Tuple = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : Any = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : int = num_labels
snake_case : List[Any] = num_choices
snake_case : Optional[int] = summary_type
snake_case : Union[str, Any] = use_proj
snake_case : List[Any] = scope
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Union[str, Any] = None
if self.use_input_lengths:
snake_case : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Dict = None
if self.use_token_type_ids:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : int = None
snake_case : Any = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = FlaubertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCamelCase__ , lengths=UpperCamelCase__ , langs=UpperCamelCase__ )
snake_case : Dict = model(UpperCamelCase__ , langs=UpperCamelCase__ )
snake_case : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = FlaubertWithLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[Any] = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : List[str] = FlaubertForQuestionAnsweringSimple(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCamelCase__ )
snake_case : Tuple = model(UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any:
'''simple docstring'''
snake_case : List[Any] = FlaubertForQuestionAnswering(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Union[str, Any] = model(UpperCamelCase__ )
snake_case : Dict = model(
UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , cls_index=UpperCamelCase__ , is_impossible=UpperCamelCase__ , p_mask=UpperCamelCase__ , )
snake_case : List[Any] = model(
UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , cls_index=UpperCamelCase__ , is_impossible=UpperCamelCase__ , )
((snake_case) ,) : Optional[int] = result_with_labels.to_tuple()
snake_case : Optional[int] = model(UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
((snake_case) ,) : int = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = FlaubertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Tuple = model(UpperCamelCase__ )
snake_case : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = self.num_labels
snake_case : List[str] = FlaubertForTokenClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = self.num_choices
snake_case : str = FlaubertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Dict = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) : Union[str, Any] = config_and_inputs
snake_case : int = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[str] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
snake_case : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[Any] = FlaubertModelTester(self )
snake_case : Any = ConfigTester(self , config_class=UpperCamelCase__ , emb_dim=37 )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : List[str] = FlaubertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case ,snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case : Union[str, Any] = True
snake_case : List[Any] = model_class(config=UpperCamelCase__ )
snake_case : List[str] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
snake_case : str = torch.jit.trace(
UpperCamelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "traced_model.pt" ) )
snake_case : List[str] = torch.jit.load(os.path.join(UpperCamelCase__ , "traced_model.pt" ) , map_location=UpperCamelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCamelCase__ ) , inputs_dict["attention_mask"].to(UpperCamelCase__ ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
snake_case : List[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
snake_case : Optional[int] = model(UpperCamelCase__ )[0]
snake_case : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 203
|
"""simple docstring"""
def __lowerCAmelCase ( lowercase : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
snake_case : int = grid[0]
for row_n in range(1 , len(lowercase ) ):
snake_case : Optional[Any] = grid[row_n]
snake_case : List[str] = fill_row(lowercase , lowercase )
snake_case : Optional[int] = grid[row_n]
return grid[-1][-1]
def __lowerCAmelCase ( lowercase : list , lowercase : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : str , snake_case_ : List[str]=1_3 , snake_case_ : Union[str, Any]=7 , snake_case_ : str=True , snake_case_ : Union[str, Any]=True , snake_case_ : Dict=True , snake_case_ : Dict=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : Optional[Any]=6_4 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Any=5 , snake_case_ : Dict=4 , snake_case_ : List[str]=3_7 , snake_case_ : str="gelu" , snake_case_ : Dict=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=5_1_2 , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=2 , snake_case_ : Dict=0.02 , snake_case_ : Any=3 , snake_case_ : Optional[int]=4 , snake_case_ : Optional[Any]=None , ):
__a : Tuple = parent
__a : Dict = batch_size
__a : Union[str, Any] = seq_length
__a : str = is_training
__a : Dict = use_input_mask
__a : Tuple = use_token_type_ids
__a : str = use_labels
__a : int = vocab_size
__a : List[str] = hidden_size
__a : Dict = embedding_size
__a : Dict = num_hidden_layers
__a : Optional[int] = num_attention_heads
__a : int = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : List[str] = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : Tuple = initializer_range
__a : List[str] = num_labels
__a : Dict = num_choices
__a : List[str] = scope
def lowerCAmelCase (self : Dict ):
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[Any] = None
if self.use_token_type_ids:
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : str = None
__a : Tuple = None
__a : Union[str, Any] = None
if self.use_labels:
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : Tuple ):
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Tuple ):
__a : Tuple = MegatronBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Dict = model(snake_case_ , token_type_ids=snake_case_ )
__a : Tuple = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : str ):
__a : Optional[Any] = MegatronBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Any , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any ):
__a : Optional[Any] = MegatronBertForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
__a : Dict = MegatronBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
__a : Union[str, Any] = MegatronBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : str = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : List[str] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : str ):
__a : Optional[Any] = MegatronBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[int] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Tuple ):
__a : Optional[int] = self.num_labels
__a : Optional[int] = MegatronBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : Dict = MegatronBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : str ):
__a : str = self.num_choices
__a : Union[str, Any] = MegatronBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Tuple = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : str = config_and_inputs
__a : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Tuple = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Optional[Any] = True
# test_resize_embeddings = False
_SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MegatronBertModelTester(self )
__a : Any = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''' )
def lowerCAmelCase (self : str ):
__a : List[str] = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__a : Tuple = os.path.join(os.environ['''MYDIR'''] , snake_case_ )
__a : Any = MegatronBertModel.from_pretrained(snake_case_ )
model.to(snake_case_ )
model.half()
__a : Union[str, Any] = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : List[str] = model(snake_case_ )[0]
__a : Optional[int] = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , snake_case_ )
__a : Tuple = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__a : Optional[int] = output[0, ii, jj]
__a : Any = expected[3 * ii + jj]
__a : Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
self.assertTrue(math.isclose(snake_case_ , snake_case_ , rel_tol=snake_case_ , abs_tol=snake_case_ ) , msg=snake_case_ )
| 90
|
import os
import sys
import unittest
lowercase__ =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase__ =os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase__ =os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : str = get_test_to_tester_mapping(snake_case_ )
__a : Tuple = get_test_to_tester_mapping(snake_case_ )
__a : Union[str, Any] = {'''BertModelTest''': '''BertModelTester'''}
__a : Tuple = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : str ):
__a : Optional[int] = get_model_to_test_mapping(snake_case_ )
__a : Any = get_model_to_test_mapping(snake_case_ )
__a : List[Any] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__a : Dict = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : int ):
__a : Any = get_model_to_tester_mapping(snake_case_ )
__a : List[str] = get_model_to_tester_mapping(snake_case_ )
__a : Any = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__a : int = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 90
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
lowerCamelCase : int = int(number**0.5 )
return number == sq * sq
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
lowerCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase : int = x_den * y_den * z_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def A ( _SCREAMING_SNAKE_CASE = 35 ) -> int:
lowerCamelCase : set = set()
lowerCamelCase : int
lowerCamelCase : Fraction = Fraction(0 )
lowerCamelCase : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
lowerCamelCase : Dict = x_num * y_den + x_den * y_num
lowerCamelCase : Any = x_den * y_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Tuple = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowerCamelCase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Optional[Any] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
lowerCamelCase : int = x_num * y_num
lowerCamelCase : int = x_den * y_num + x_num * y_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowerCamelCase : str = x_num * x_num * y_num * y_num
lowerCamelCase : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Any = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Optional[int] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : Optional[Any] = controlnet_params
snake_case__ : Any = 'bird'
snake_case__ : Any = jax.device_count()
snake_case__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
snake_case__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Dict = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : Any = replicate(__UpperCamelCase )
snake_case__ : Union[str, Any] = shard(__UpperCamelCase )
snake_case__ : Any = shard(__UpperCamelCase )
snake_case__ : Dict = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : str = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : List[str] = controlnet_params
snake_case__ : Optional[Any] = 'Chef in the kitchen'
snake_case__ : List[Any] = jax.device_count()
snake_case__ : int = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
snake_case__ : int = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Any = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : List[Any] = replicate(__UpperCamelCase )
snake_case__ : List[str] = shard(__UpperCamelCase )
snake_case__ : Optional[int] = shard(__UpperCamelCase )
snake_case__ : Any = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 143
| 0
|
import numpy as np
def snake_case (UpperCAmelCase__ ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def snake_case (UpperCAmelCase__ ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _a ( _lowerCamelCase ):
raise NotImplementedError()
@abstractmethod
def _a ( self ):
raise NotImplementedError()
| 292
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A__ :
lowerCAmelCase__ : Dict = BlenderbotSmallConfig
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : int = "gelu"
def __init__( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str=13 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int=99 , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=20 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Union[str, Any]=0 , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_blenderbot_small_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def a__ ( self : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModel(config=_UpperCAmelCase ).get_decoder()
__lowercase = inputs_dict['input_ids']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['attention_mask'][:1, :]
__lowercase = inputs_dict['head_mask']
__lowercase = 1
# first forward pass
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
__lowercase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=None , ) -> Tuple:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Any = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCAmelCase__ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ : int = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Any = False
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : List[str] = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowerCAmelCase__ : Dict = "facebook/blenderbot_small-90M"
@cached_property
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , return_tensors='tf' )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_UpperCAmelCase , )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A ( a_ ) -> Union[str, Any]:
if is_torch_version('<' ,'2.0.0' ) or not hasattr(lowerCAmelCase__ ,'_dynamo' ):
return False
return isinstance(lowerCAmelCase__ ,torch._dynamo.eval_frame.OptimizedModule )
def A ( a_ ,a_ = True ) -> Optional[Any]:
__UpperCamelCase : Union[str, Any] =(torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase : Any =is_compiled_module(lowerCAmelCase__ )
if is_compiled:
__UpperCamelCase : Optional[Any] =model
__UpperCamelCase : List[Any] =model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
__UpperCamelCase : str =model.module
if not keep_fpaa_wrapper:
__UpperCamelCase : Union[str, Any] =getattr(lowerCAmelCase__ ,'forward' )
__UpperCamelCase : Optional[Any] =model.__dict__.pop('_original_forward' ,lowerCAmelCase__ )
if original_forward is not None:
while hasattr(lowerCAmelCase__ ,'__wrapped__' ):
__UpperCamelCase : Dict =forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase : str =forward
if getattr(lowerCAmelCase__ ,'_converted_to_transformer_engine' ,lowerCAmelCase__ ):
convert_model(lowerCAmelCase__ ,to_transformer_engine=lowerCAmelCase__ )
if is_compiled:
__UpperCamelCase : Optional[Any] =model
__UpperCamelCase : Tuple =compiled_model
return model
def A ( ) -> Optional[int]:
PartialState().wait_for_everyone()
def A ( a_ ,a_ ) -> List[str]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase__ ,lowerCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase__ ,lowerCAmelCase__ )
@contextmanager
def A ( **a_ ) -> Dict:
for key, value in kwargs.items():
__UpperCamelCase : Any =str(lowerCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A ( a_ ) -> int:
if not hasattr(lowerCAmelCase__ ,'__qualname__' ) and not hasattr(lowerCAmelCase__ ,'__name__' ):
__UpperCamelCase : Union[str, Any] =getattr(lowerCAmelCase__ ,'__class__' ,lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ ,'__qualname__' ):
return obj.__qualname__
if hasattr(lowerCAmelCase__ ,'__name__' ):
return obj.__name__
return str(lowerCAmelCase__ )
def A ( a_ ,a_ ) -> str:
for key, value in source.items():
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
__UpperCamelCase : Any =destination.setdefault(lowerCAmelCase__ ,{} )
merge_dicts(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
__UpperCamelCase : Optional[Any] =value
return destination
def A ( a_ = None ) -> int:
if port is None:
__UpperCamelCase : List[Any] =29_500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 366
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : List[Any] ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =[
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : str ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =[
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__UpperCamelCase : int ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : str ='fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =[
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__UpperCamelCase : Any ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =[
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__UpperCamelCase : Tuple ='fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =[
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCamelCase : Any ='fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase__ , variant=lowerCamelCase__ ) )
| 245
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = RobertaTokenizer
def __init__( self : Optional[Any] , __UpperCAmelCase : str=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[Any]="replace" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Optional[int]="</s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : int=True , **__UpperCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
_A = getattr(__UpperCAmelCase , pre_tok_state.pop("type" ) )
_A = add_prefix_space
_A = pre_tok_class(**__UpperCAmelCase )
_A = add_prefix_space
_A = "post_processor"
_A = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state["sep"] )
if "cls" in state:
_A = tuple(state["cls"] )
_A = False
if state.get("add_prefix_space" , __UpperCAmelCase ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get("trim_offsets" , __UpperCAmelCase ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(__UpperCAmelCase , state.pop("type" ) )
_A = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
_A = value
def lowerCAmelCase ( self : Any , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
_A = kwargs.get("is_split_into_words" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[str] ):
'''simple docstring'''
_A = kwargs.get("is_split_into_words" , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
_A = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ):
'''simple docstring'''
_A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 79
|
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79
| 1
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __UpperCamelCase ( _A = 100 ):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
for i in range(2 , max_n + 1 ):
lowerCAmelCase_ = pre_numerator
lowerCAmelCase_ = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ = cur_numerator
lowerCAmelCase_ = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(f"{solution() = }")
| 167
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( __UpperCAmelCase ):
__snake_case = ['image_processor', 'tokenizer']
__snake_case = 'OwlViTImageProcessor'
__snake_case = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self, UpperCamelCase__=None, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', UpperCamelCase__, )
lowerCAmelCase_ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__, UpperCamelCase__ )
def __call__( self, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__=None, UpperCamelCase__="max_length", UpperCamelCase__="np", **UpperCamelCase__ ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(UpperCamelCase__, UpperCamelCase__ ) or (isinstance(UpperCamelCase__, UpperCamelCase__ ) and not isinstance(text[0], UpperCamelCase__ )):
lowerCAmelCase_ = [self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )]
elif isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(text[0], UpperCamelCase__ ):
lowerCAmelCase_ = []
# Maximum number of queries across batch
lowerCAmelCase_ = max([len(UpperCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase__ ) != max_num_queries:
lowerCAmelCase_ = t + [''' '''] * (max_num_queries - len(UpperCamelCase__ ))
lowerCAmelCase_ = self.tokenizer(UpperCamelCase__, padding=UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
encodings.append(UpperCamelCase__ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase_ = np.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase_ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase_ = torch.cat([encoding['''input_ids'''] for encoding in encodings], dim=0 )
lowerCAmelCase_ = torch.cat([encoding['''attention_mask'''] for encoding in encodings], dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase_ = tf.stack([encoding['''input_ids'''] for encoding in encodings], axis=0 )
lowerCAmelCase_ = tf.stack([encoding['''attention_mask'''] for encoding in encodings], axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = input_ids
lowerCAmelCase_ = attention_mask
if query_images is not None:
lowerCAmelCase_ = BatchEncoding()
lowerCAmelCase_ = self.image_processor(
UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ ).pixel_values
lowerCAmelCase_ = query_pixel_values
if images is not None:
lowerCAmelCase_ = self.image_processor(UpperCamelCase__, return_tensors=UpperCamelCase__, **UpperCamelCase__ )
if text is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ), tensor_type=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__, **UpperCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', UpperCamelCase__, )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', UpperCamelCase__, )
return self.image_processor
| 167
| 1
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__A = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
__A = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = (images / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCamelCase = numpy_to_pil(UpperCamelCase__ )
return images
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
if images.ndim == 3:
__lowerCamelCase = images[None, ...]
__lowerCamelCase = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCamelCase = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__lowerCamelCase = [Image.fromarray(UpperCamelCase__ ) for image in images]
return pil_images
| 90
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
__lowerCamelCase = hf_model.adapter
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('adaptor.' )[-1]
__lowerCamelCase = name.split('.' )
if items[1].isdigit():
__lowerCamelCase = int(items[1] )
else:
__lowerCamelCase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
__lowerCamelCase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""" )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""" )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
__lowerCamelCase = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__lowerCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , ) -> str:
"""simple docstring"""
__lowerCamelCase = WavaVecaConfig.from_pretrained(
UpperCamelCase__ , add_adapter=UpperCamelCase__ , adapter_stride=UpperCamelCase__ , adapter_kernel_size=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , output_hidden_size=UpperCamelCase__ , )
__lowerCamelCase = MBartConfig.from_pretrained(UpperCamelCase__ )
# load model
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
__lowerCamelCase = model[0].eval()
# load feature extractor
__lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ , use_auth_token=UpperCamelCase__ )
# set weights for wav2vec2 encoder
__lowerCamelCase = WavaVecaModel(UpperCamelCase__ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase__ )
# load decoder weights
__lowerCamelCase = MBartForCausalLM(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase__ )
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowerCamelCase = SpeechEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
__lowerCamelCase = False
__lowerCamelCase = MBartaaTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = hf_wavavec.config.to_dict()
__lowerCamelCase = tokenizer.pad_token_id
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 'mbart50'
__lowerCamelCase = 'wav2vec2'
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = 25_0004
__lowerCamelCase = tokenizer.eos_token_id
__lowerCamelCase = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=10_24, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=25_00_04, type=int, help="`decoder_start_token_id` of model config")
__A = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 90
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__SCREAMING_SNAKE_CASE =importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__SCREAMING_SNAKE_CASE =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if "://" in dataset_path:
lowercase_ : List[Any] = dataset_path.split('://' )[1]
return dataset_path
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__( __SCREAMING_SNAKE_CASE : fsspec.AbstractFileSystem , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Optional[int] = not is_remote_filesystem(__SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__SCREAMING_SNAKE_CASE ) , fs._strip_protocol(__SCREAMING_SNAKE_CASE ) )
else:
fs.mv(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , recursive=__SCREAMING_SNAKE_CASE )
def lowercase__( ):
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Optional[Any] = threading.Lock()
| 321
|
"""simple docstring"""
__SCREAMING_SNAKE_CASE ={
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
__SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()}
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Union[str, Any] = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowercase_ : Dict = ''
for word in coded.split():
while len(__SCREAMING_SNAKE_CASE ) != 0:
decoded += decode_dict[word[:5]]
lowercase_ : Any = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 321
| 1
|
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : list ) -> Dict:
"""simple docstring"""
__magic_name__ = set_counts
__magic_name__ = max(__UpperCamelCase )
__magic_name__ = len(__UpperCamelCase )
__magic_name__ = [1] * num_sets
__magic_name__ = list(range(__UpperCamelCase ) )
def _lowercase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
__magic_name__ = self.get_parent(__UpperCamelCase )
__magic_name__ = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__magic_name__ = 0
__magic_name__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__magic_name__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__magic_name__ = 0
__magic_name__ = src_parent
__magic_name__ = self.set_counts[src_parent]
__magic_name__ = max(self.max_set , __UpperCamelCase )
return True
def _lowercase ( self : str , UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
__magic_name__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 88
|
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292
| 0
|
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = emb.weight.shape
__UpperCAmelCase : Optional[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase , map_location="""cpu""" )
__UpperCAmelCase : str = Namespace(**checkpoint["""cfg"""]["""model"""] )
__UpperCAmelCase : Optional[int] = checkpoint['model']
remove_ignore_keys_(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
__UpperCAmelCase : Any = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
__UpperCAmelCase : int = XGLMConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCAmelCase : int = XGLMForCausalLM(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
print(_UpperCAmelCase )
__UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase : Any = parser.parse_args()
UpperCAmelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 351
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float:
'''simple docstring'''
__UpperCAmelCase : Tuple = sorted(numsa + numsa )
__UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 320
| 0
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Any , UpperCAmelCase_ : CLIPSegForImageSegmentation , UpperCAmelCase_ : CLIPSegProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) ->List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , "steps_offset") and scheduler.config.steps_offset != 1:
lowerCamelCase__: List[str] =(
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowerCamelCase__: Tuple =dict(scheduler.config)
lowerCamelCase__: List[str] =1
lowerCamelCase__: Optional[int] =FrozenDict(UpperCAmelCase_)
if hasattr(scheduler.config , "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
lowerCamelCase__: Tuple =(
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =dict(scheduler.config)
lowerCamelCase__: str =True
lowerCamelCase__: Optional[Any] =FrozenDict(UpperCAmelCase_)
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .")
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[Union[str, int]] = "auto") ->List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__: Dict =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
lowerCamelCase__: Dict =torch.device("cuda")
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_)
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
if self.device != torch.device("meta") or not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
def __call__(self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : str , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt").to(self.device)
lowerCamelCase__: Dict =self.segmentation_model(**UpperCAmelCase_)
lowerCamelCase__: Tuple =torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
lowerCamelCase__: List[Any] =self.numpy_to_pil(UpperCAmelCase_)[0].resize(image.size)
# Run inpainting pipeline with the generated mask
lowerCamelCase__: int =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 10
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowercase ( _A ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE : Optional[int] = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def __lowercase ( _A , _A ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE : List[Any] = []
for d in reversed(_A ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE : Tuple = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A = None , _A = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_A ) -> None:
SCREAMING_SNAKE_CASE : int = True
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE : Dict = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE : Any = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
SCREAMING_SNAKE_CASE : Tuple = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE : List[Tuple[slice, ...]] = []
SCREAMING_SNAKE_CASE : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE : Tuple[slice, ...] = tuple(_A )
SCREAMING_SNAKE_CASE : List[str] = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : List[str] = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE : Tuple = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE : int = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowercase ( _A , _A , _A , _A ) -> torch.Tensor:
SCREAMING_SNAKE_CASE : Tuple = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE : Union[str, Any] = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE : Any = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE : List[Any] = _get_minimal_slice_set(
_A , _A , _A , )
SCREAMING_SNAKE_CASE : List[Any] = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowercase ( _A , _A , _A , _A , _A = False , _A = None , _A = False , ) -> Any:
if not (len(_A ) > 0):
raise ValueError("""Must provide at least one input""" )
SCREAMING_SNAKE_CASE : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
SCREAMING_SNAKE_CASE : str = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(_A ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE : Union[str, Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_prep_inputs , _A )
SCREAMING_SNAKE_CASE : Optional[int] = None
if _out is not None:
SCREAMING_SNAKE_CASE : Optional[int] = tensor_tree_map(lambda _A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_A ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE : int = _select_chunk
else:
SCREAMING_SNAKE_CASE : Optional[int] = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
SCREAMING_SNAKE_CASE : Dict[str, Any] = tensor_tree_map(_A , _A )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE : Tuple = layer(**_A )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE : List[str] = tensor_tree_map(lambda _A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(_A , _A ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE : str = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE : List[Any] = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
SCREAMING_SNAKE_CASE : Any = tensor_tree_map(lambda _A : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : int = 5_1_2 , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = max_chunk_size
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[tuple] = None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int ) ->int:
"""simple docstring"""
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE : Dict = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCAmelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*UpperCAmelCase__ , chunk_size=UpperCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : List[str] = len(UpperCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE : int = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE : Tuple = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE : List[str] = i
SCREAMING_SNAKE_CASE : List[str] = (i + len(UpperCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Iterable , UpperCAmelCase__ : Iterable ) ->bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = True
for aa, aa in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
assert type(UpperCAmelCase__ ) == type(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE : List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(UpperCAmelCase__ , UpperCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def _lowercase ( self : List[str] , UpperCAmelCase__ : Callable , UpperCAmelCase__ : tuple , UpperCAmelCase__ : int , ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : tuple = tree_map(lambda UpperCAmelCase__ : a.shape if isinstance(UpperCAmelCase__ , torch.Tensor ) else a , UpperCAmelCase__ , UpperCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self._compare_arg_caches(self.cached_arg_data , UpperCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE : List[Any] = False
if not consistent:
SCREAMING_SNAKE_CASE : List[Any] = self._determine_favorable_chunk_size(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 245
| 0
|
'''simple docstring'''
import math
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_60:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 367
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 0
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_lowerCamelCase : List[str] = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
_lowerCamelCase : Optional[Any] = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('0.9.0'):
raise Exception('requires fairseq >= 0.9.0')
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : int = ' Hello world! cécé herlolip'
_lowerCamelCase : Tuple = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = dct.pop(_UpperCAmelCase )
A_ : List[str] = val
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = torch.load(_UpperCAmelCase , map_location='''cpu''' )
A_ : Tuple = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval()
hub_interface.model.load_state_dict(sd['''model'''] )
return hub_interface
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : str = emb.weight.shape
A_ : str = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
A_ : int = emb.weight.data
return lin_layer
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
if not os.path.exists(_UpperCAmelCase ):
A_ : int = torch.hub.load('''pytorch/fairseq''' , _UpperCAmelCase ).eval()
else:
A_ : Any = load_xsum_checkpoint(_UpperCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
A_ : List[str] = checkpoint_path.replace('''.''' , '''-''' )
A_ : Optional[Any] = BartConfig.from_pretrained(_UpperCAmelCase )
A_ : int = bart.encode(_UpperCAmelCase ).unsqueeze(0 )
A_ : List[str] = BartTokenizer.from_pretrained(_UpperCAmelCase ).encode(_UpperCAmelCase , return_tensors='''pt''' ).unsqueeze(0 )
if not torch.eq(_UpperCAmelCase , _UpperCAmelCase ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
A_ : Optional[Any] = bart.state_dict()
remove_ignore_keys_(_UpperCAmelCase )
A_ : str = state_dict['''model.decoder.embed_tokens.weight''']
for src, dest in mnli_rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : List[str] = BartForSequenceClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
A_ : Tuple = bart.predict('''mnli''' , _UpperCAmelCase , return_logits=_UpperCAmelCase )
A_ : str = model(_UpperCAmelCase )[0] # logits
else: # no classification heads to worry about
A_ : str = bart.model.state_dict()
remove_ignore_keys_(_UpperCAmelCase )
A_ : List[Any] = state_dict['''decoder.embed_tokens.weight''']
A_ : Optional[int] = bart.extract_features(_UpperCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
A_ : int = BartModel(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
A_ : Optional[Any] = model(_UpperCAmelCase ).model[0]
else:
A_ : Optional[Any] = BartForConditionalGeneration(_UpperCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''lm_head''' ):
A_ : Any = make_linear_from_emb(model.model.shared )
A_ : Dict = model.model(_UpperCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config', default=None, type=str, help='Which huggingface architecture to use: bart-large-xsum'
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 167
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
"""simple docstring"""
if attention_mask is None:
A_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase :
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=99 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Any=2 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[Any]=0.02 , ):
"""simple docstring"""
A_ : Any = parent
A_ : Any = batch_size
A_ : Optional[Any] = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_labels
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Tuple = eos_token_id
A_ : int = pad_token_id
A_ : int = bos_token_id
A_ : str = initializer_range
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A_ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A_ : Optional[Any] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
A_ : Any = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ , A_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = 20
A_ : Any = model_class_name(_lowerCamelCase )
A_ : List[Any] = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : int = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
A_ : str = model.decode(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def a_ ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = 20
A_ : Dict = model_class_name(_lowerCamelCase )
A_ : Dict = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ : Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Tuple = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
A_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Dict = 99
def a_ ( self : str ):
"""simple docstring"""
A_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A_ : List[str] = input_ids.shape[0]
A_ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = self._get_config_and_data()
A_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : Optional[int] = lm_model(input_ids=_lowerCamelCase )
A_ : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A_ : Dict = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
A_ : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A_ : Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
A_ : Tuple = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase):
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : List[str] ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A_ : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : List[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
A_ : Union[str, Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : Optional[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : str = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A_ : str = np.ones((1, 1) ) * model.config.eos_token_id
A_ : List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 167
| 1
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> int:
"""simple docstring"""
UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
UpperCAmelCase = get_aligned_output_features_output_indices(_a , _a , _a )
self.assertEqual(_a , ['''c'''] )
self.assertEqual(_a , [2] )
# Out indices set to match out features
UpperCAmelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , _a , _a )
self.assertEqual(_a , ['''a''', '''c'''] )
self.assertEqual(_a , [0, 2] )
# Out features set to match out indices
UpperCAmelCase = get_aligned_output_features_output_indices(_a , [0, 2] , _a )
self.assertEqual(_a , ['''a''', '''c'''] )
self.assertEqual(_a , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase = get_aligned_output_features_output_indices(_a , [-3, -1] , _a )
self.assertEqual(_a , ['''a''', '''c'''] )
self.assertEqual(_a , [-3, -1] )
def a__( self : str )-> Tuple:
"""simple docstring"""
with self.assertRaises(_a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _a )
# Out features must be a list
with self.assertRaises(_a ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(_a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(_a ):
verify_out_features_out_indices(_a , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(_a ):
verify_out_features_out_indices(_a , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(_a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(_a ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(_a ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BackboneMixin()
UpperCAmelCase = ["a", "b", "c"]
UpperCAmelCase = ["a", "c"]
UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 370
|
'''simple docstring'''
_lowercase : Any = range(2, 20 + 1)
_lowercase : str = [10**k for k in range(ks[-1] + 1)]
_lowercase : dict[int, dict[int, list[list[int]]]] = {}
def lowerCamelCase__ ( A : int , A : str , A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = sum(a_i[j] for j in range(A , len(A ) ) )
UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(A ) , A ) ) )
UpperCAmelCase , UpperCAmelCase = 0, 0
UpperCAmelCase = n - i
UpperCAmelCase = memo.get(A )
if sub_memo is not None:
UpperCAmelCase = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
UpperCAmelCase = -1
for _k in range(len(A ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
UpperCAmelCase = _k
break
if max_jump >= 0:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
UpperCAmelCase = diff + c
for j in range(min(A , len(A ) ) ):
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
if new_c > 0:
add(A , A , A )
else:
UpperCAmelCase = []
else:
UpperCAmelCase = {c: []}
UpperCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
UpperCAmelCase , UpperCAmelCase = next_term(A , k - 1 , i + dn , A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
UpperCAmelCase , UpperCAmelCase = compute(A , A , i + dn , A )
diff += _diff
dn += terms_jumped
UpperCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
UpperCAmelCase = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A , (diff, dn, k) )
return (diff, dn)
def lowerCamelCase__ ( A : Dict , A : Optional[int] , A : List[Any] , A : int ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
UpperCAmelCase = i
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
UpperCAmelCase = ds_c + ds_b
diff += addend
UpperCAmelCase = 0
for j in range(A ):
UpperCAmelCase = a_i[j] + addend
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A , A , A )
return diff, i - start_i
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , A : Optional[Any] ):
'''simple docstring'''
for j in range(A , len(A ) ):
UpperCAmelCase = digits[j] + addend
if s >= 10:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
UpperCAmelCase = addend // 10 + quotient
else:
UpperCAmelCase = s
UpperCAmelCase = addend // 10
if addend == 0:
break
while addend > 0:
UpperCAmelCase , UpperCAmelCase = divmod(A , 10 )
digits.append(A )
def lowerCamelCase__ ( A : int = 10**15 ):
'''simple docstring'''
UpperCAmelCase = [1]
UpperCAmelCase = 1
UpperCAmelCase = 0
while True:
UpperCAmelCase , UpperCAmelCase = next_term(A , 20 , i + dn , A )
dn += terms_jumped
if dn == n - i:
break
UpperCAmelCase = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 91
| 0
|
'''simple docstring'''
import datasets
SCREAMING_SNAKE_CASE__ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
SCREAMING_SNAKE_CASE__ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
SCREAMING_SNAKE_CASE__ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
| 321
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
UpperCamelCase = 1.5
UpperCamelCase = int(factor * num_class_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=__UpperCamelCase )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCamelCase = client.query(text=__UpperCamelCase )
if len(__UpperCamelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCamelCase = int(factor * num_images )
UpperCamelCase = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__UpperCamelCase , aesthetic_weight=0.1 , )
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = tqdm(desc="""downloading real regularization images""" , total=__UpperCamelCase )
with open(F"{class_data_dir}/caption.txt" , """w""" ) as fa, open(F"{class_data_dir}/urls.txt" , """w""" ) as fa, open(
F"{class_data_dir}/images.txt" , """w""" ) as fa:
while total < num_class_images:
UpperCamelCase = class_images[count]
count += 1
try:
UpperCamelCase = requests.get(images["""url"""] )
if img.status_code == 200:
UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F"{class_data_dir}/images/{total}.jpg" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowercase__ ( )-> str:
UpperCamelCase = argparse.ArgumentParser("""""" , add_help=__UpperCamelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__UpperCamelCase , type=__UpperCamelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=__UpperCamelCase )
return parser.parse_args()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 321
| 1
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = '▁'
_lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a ( _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = BigBirdTokenizer
lowerCAmelCase : List[str] = BigBirdTokenizerFast
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(__snake_case , keep_accents=__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = '''<s>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''[MASK]''' )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCamelCase_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCamelCase_ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ = tokenizer.tokenize(__snake_case )
UpperCAmelCase_ = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__snake_case )
UpperCAmelCase_ = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = BigBirdTokenizer(__snake_case , keep_accents=__snake_case )
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCamelCase_ ( self : List[str] ):
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = '''Hello World!'''
UpperCAmelCase_ = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@slow
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase_ = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(__snake_case , self.big_tokenizer.encode(__snake_case ) )
@require_torch
@slow
def lowerCamelCase_ ( self : Any ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = ''' '''.join(__snake_case )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(__snake_case , return_tensors='''pt''' , return_token_type_ids=__snake_case )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__snake_case )
UpperCAmelCase_ = BigBirdConfig(attention_type='''original_full''' )
UpperCAmelCase_ = BigBirdModel(__snake_case )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__snake_case )
model(**__snake_case )
@slow
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''' )
UpperCAmelCase_ = tokenizer.decode(tokenizer('''Paris is the [MASK].''' ).input_ids )
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''' )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
# fmt: off
UpperCAmelCase_ = {'''input_ids''': [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 177
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float ) -> float:
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177
| 1
|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowerCAmelCase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowerCAmelCase = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = set()
lowerCAmelCase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ : Dict = char
lowerCAmelCase__ : Any = set(UpperCamelCase )
return pairs
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="<mask>" ,**__UpperCAmelCase ,) -> List[str]:
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,mask_token=__UpperCAmelCase ,**__UpperCAmelCase ,)
lowerCAmelCase__ : List[Any] = vocab_file
lowerCAmelCase__ : Dict = merges_file
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : Optional[int] = 3
self.add_from_file(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase ,encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase__ : int = merges_handle.read().split("""\n""" )[:-1]
lowerCAmelCase__ : Optional[int] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase__ : Dict = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Dict = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
lowerCAmelCase__ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
lowerCAmelCase__ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return len(self.encoder )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : List[Any] = tuple(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCAmelCase__ : Optional[int] = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ : Tuple = min(__UpperCAmelCase ,key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = bigram
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : List[str] = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ : Any = word.index(__UpperCAmelCase ,__UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ : Dict = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : str = tuple(__UpperCAmelCase )
lowerCAmelCase__ : int = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = """@@ """.join(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = word[:-4]
lowerCAmelCase__ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : str = re.findall(R"""\S+\n?""" ,__UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.encoder.get(__UpperCAmelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
return self.decoder.get(__UpperCAmelCase ,self.unk_token )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : List[str] = """ """.join(__UpperCAmelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : List[str] = os.path.join(
__UpperCAmelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file ,__UpperCAmelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.merges_file ,__UpperCAmelCase )
return out_vocab_file, out_merge_file
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
lowerCAmelCase__ : Tuple = f.readlines()
for lineTmp in lines:
lowerCAmelCase__ : Optional[int] = lineTmp.strip()
lowerCAmelCase__ : List[str] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
lowerCAmelCase__ : Optional[Any] = line[:idx]
lowerCAmelCase__ : int = len(self.encoder )
| 37
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[str] = 'bart'
A_ : Optional[Any] = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple:
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ):
_a = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
class __lowerCamelCase ( a__ ):
'''simple docstring'''
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a = {0: '''batch'''}
_a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_a = {0: '''batch''', 1: '''decoder_sequence'''}
_a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_a = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__UpperCAmelCase , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__UpperCAmelCase ):
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
_a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__UpperCAmelCase , **__UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
_a = common_inputs['''decoder_input_ids'''].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__UpperCAmelCase , __UpperCAmelCase )
_a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers
_a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
torch.zeros(__UpperCAmelCase ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_a , _a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['''attention_mask'''].dtype
_a = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
_a = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase )
]
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase )
_a = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
elif self.task == "causal-lm":
_a = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
else:
_a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
return common_inputs
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
_a = super(__UpperCAmelCase , self )._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
| 320
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ):
UpperCamelCase_: Optional[int] = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCamelCase_: int = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase_: Union[str, Any] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase_: int = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase_: Tuple = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
@slow
def _a ( self ):
UpperCamelCase_: List[str] = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCamelCase_: Any = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase_: Optional[int] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase_: Tuple = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase_: Optional[int] = model(_lowerCamelCase )['last_hidden_state'].detach()
self.assertEqual(output.shape , _lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3 ) )
| 292
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
A_ : Any = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def snake_case () -> Union[str, Any]:
UpperCamelCase_: Tuple = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_: List[str] = get_sagemaker_input()
else:
UpperCamelCase_: List[str] = get_cluster_input()
return config
def snake_case (UpperCAmelCase__=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase_: List[Any] = subparsers.add_parser('config' , description=UpperCAmelCase__ )
else:
UpperCamelCase_: List[Any] = argparse.ArgumentParser('Accelerate config command' , description=UpperCAmelCase__ )
parser.add_argument(
'--config_file' , default=UpperCAmelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def snake_case (UpperCAmelCase__ ) -> List[Any]:
UpperCamelCase_: Union[str, Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase_: Tuple = args.config_file
else:
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
UpperCamelCase_: Dict = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(UpperCAmelCase__ )
else:
config.to_yaml_file(UpperCAmelCase__ )
print(F'''accelerate configuration saved at {config_file}''' )
def snake_case () -> str:
UpperCamelCase_: Tuple = config_command_parser()
UpperCamelCase_: int = parser.parse_args()
config_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 292
| 1
|
from math import factorial
class a__ :
def __init__( self : List[str],_A : Tuple,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = real
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Tuple = [1] * rank
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = rank
def __repr__( self : List[Any] ):
"""simple docstring"""
return (
F'{self.real}+'
F'{"+".join(str(_A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real,_A )
def __add__( self : Any,_A : List[Any] ):
"""simple docstring"""
if not isinstance(_A,_A ):
return Dual(self.real + other,self.duals )
SCREAMING_SNAKE_CASE_ : str = self.duals.copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = other.duals.copy()
if len(_A ) > len(_A ):
o_dual.extend([1] * (len(_A ) - len(_A )) )
elif len(_A ) < len(_A ):
s_dual.extend([1] * (len(_A ) - len(_A )) )
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(_A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real,_A )
A = __add__
def __sub__( self : Any,_A : Union[str, Any] ):
"""simple docstring"""
return self + other * -1
def __mul__( self : Tuple,_A : Any ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : int = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other,_A )
SCREAMING_SNAKE_CASE_ : str = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real,_A )
A = __mul__
def __truediv__( self : Optional[Any],_A : Dict ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Dict = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other,_A )
raise ValueError
def __floordiv__( self : List[str],_A : Any ):
"""simple docstring"""
if not isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other,_A )
raise ValueError
def __pow__( self : Optional[Any],_A : Any ):
"""simple docstring"""
if n < 0 or isinstance(_A,_A ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
if not callable(lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
SCREAMING_SNAKE_CASE_ : Tuple = Dual(lowerCAmelCase , 1 )
SCREAMING_SNAKE_CASE_ : Dict = func(lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 18
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class a_ :
'''simple docstring'''
def __init__( self : Tuple , lowercase__ : int , lowercase__ : List[Any]=2 , lowercase__ : Any=True , lowercase__ : int=False , lowercase__ : Optional[Any]=10 , lowercase__ : Tuple=3 , lowercase__ : Tuple=32 * 4 , lowercase__ : Union[str, Any]=32 * 6 , lowercase__ : List[Any]=4 , lowercase__ : Any=32 , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_auxiliary_loss
lowerCAmelCase__ = num_queries
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = min_size
lowerCAmelCase__ = max_size
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = mask_feature_size
def __snake_case ( self : int):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCamelCase__)
lowerCAmelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase__)
lowerCAmelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase__) > 0.5
).float()
lowerCAmelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase__) > 0.5).long()
lowerCAmelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case ( self : List[str]):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __snake_case ( self : int , lowercase__ : Dict , lowercase__ : str):
'''simple docstring'''
lowerCAmelCase__ = output.encoder_hidden_states
lowerCAmelCase__ = output.pixel_decoder_hidden_states
lowerCAmelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase__) , config.decoder_config.decoder_layers)
def __snake_case ( self : Dict , lowercase__ : Any , lowercase__ : Any , lowercase__ : int , lowercase__ : str=False):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase__ = MaskFormerModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
lowerCAmelCase__ = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
lowerCAmelCase__ = model(UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase__ , UpperCamelCase__)
def __snake_case ( self : Optional[int] , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = MaskFormerForInstanceSegmentation(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
def comm_check_on_output(lowercase__ : Optional[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
lowerCAmelCase__ = model(pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__)
lowerCAmelCase__ = model(UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
lowerCAmelCase__ = model(
pixel_values=UpperCamelCase__ , pixel_mask=UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
comm_check_on_output(UpperCamelCase__)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class a_ ( __a , __a , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = MaskFormerModelTester(self)
lowerCAmelCase__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__)
def __snake_case ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase__)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def __snake_case ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def __snake_case ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def __snake_case ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def __snake_case ( self : Optional[int]):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __snake_case ( self : Any):
'''simple docstring'''
pass
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(UpperCamelCase__)
lowerCAmelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__)
@slow
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ = MaskFormerModel.from_pretrained(UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = (self.model_tester.min_size,) * 2
lowerCAmelCase__ = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase__),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase__),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase__).long(),
}
lowerCAmelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCamelCase__)
lowerCAmelCase__ = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase__ , **UpperCamelCase__ , output_hidden_states=UpperCamelCase__)
def __snake_case ( self : Dict):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(UpperCamelCase__).to(UpperCamelCase__)
lowerCAmelCase__ = model(**UpperCamelCase__ , output_attentions=UpperCamelCase__)
self.assertTrue(outputs.attentions is not None)
def __snake_case ( self : Dict):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = model_class(UpperCamelCase__)
model.to(UpperCamelCase__)
model.train()
lowerCAmelCase__ = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__).loss
loss.backward()
def __snake_case ( self : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(UpperCamelCase__)
model.to(UpperCamelCase__)
model.train()
lowerCAmelCase__ = model(UpperCamelCase__ , mask_labels=UpperCamelCase__ , class_labels=UpperCamelCase__)
lowerCAmelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase__)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
lowerCAmelCase__ = 1e-4
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(UpperCamelCase__)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(UpperCamelCase__ , return_tensors='pt').to(UpperCamelCase__)
lowerCAmelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__)
lowerCAmelCase__ = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
lowerCAmelCase__ = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
lowerCAmelCase__ = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]]).to(UpperCamelCase__)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase__)
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(UpperCamelCase__ , return_tensors='pt').to(UpperCamelCase__)
lowerCAmelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__)
# masks_queries_logits
lowerCAmelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase__ = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
lowerCAmelCase__ = torch.tensor(UpperCamelCase__).to(UpperCamelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
# class_queries_logits
lowerCAmelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowerCAmelCase__ = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(UpperCamelCase__)
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(UpperCamelCase__ , return_tensors='pt').to(UpperCamelCase__)
lowerCAmelCase__ = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase__ , (1, 3, 800, 1_088))
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__)
# masks_queries_logits
lowerCAmelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
lowerCAmelCase__ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
lowerCAmelCase__ = torch.tensor(UpperCamelCase__).to(UpperCamelCase__)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
# class_queries_logits
lowerCAmelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
lowerCAmelCase__ = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]]).to(UpperCamelCase__)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__))
def __snake_case ( self : Tuple):
'''simple docstring'''
lowerCAmelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase__)
.eval()
)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = image_processor(
[np.zeros((3, 800, 1_333)), np.zeros((3, 800, 1_333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors='pt' , )
lowerCAmelCase__ = inputs['pixel_values'].to(UpperCamelCase__)
lowerCAmelCase__ = [el.to(UpperCamelCase__) for el in inputs['mask_labels']]
lowerCAmelCase__ = [el.to(UpperCamelCase__) for el in inputs['class_labels']]
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__)
self.assertTrue(outputs.loss is not None)
| 369
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = ['image_processor']
UpperCAmelCase_ = 'SamImageProcessor'
def __init__( self : Tuple , lowercase__ : Dict):
'''simple docstring'''
super().__init__(lowercase__)
lowerCAmelCase__ = self.image_processor
lowerCAmelCase__ = -10
lowerCAmelCase__ = self.image_processor.size['longest_edge']
def __call__( self : List[Any] , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Dict , ):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase__ = encoding_image_processor['original_sizes']
if hasattr(lowercase__ , 'numpy'): # Checks if Torch or TF tensor
lowerCAmelCase__ = original_sizes.numpy()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._check_and_preprocess_points(
input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , )
lowerCAmelCase__ = self._normalize_and_convert(
lowercase__ , lowercase__ , input_points=lowercase__ , input_labels=lowercase__ , input_boxes=lowercase__ , return_tensors=lowercase__ , )
return encoding_image_processor
def __snake_case ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : str=None , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : Optional[Any]="pt" , ):
'''simple docstring'''
if input_points is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0]) for point in input_points
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__)
for point, original_size in zip(lowercase__ , lowercase__)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
lowerCAmelCase__ , lowerCAmelCase__ = self._pad_points_and_labels(lowercase__ , lowercase__)
lowerCAmelCase__ = np.array(lowercase__)
if input_labels is not None:
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if len(lowercase__) != len(lowercase__):
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , original_sizes[0] , is_bounding_box=lowercase__)
for box in input_boxes
]
else:
lowerCAmelCase__ = [
self._normalize_coordinates(self.target_size , lowercase__ , lowercase__ , is_bounding_box=lowercase__)
for box, original_size in zip(lowercase__ , lowercase__)
]
lowerCAmelCase__ = np.array(lowercase__)
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# boxes batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes})
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'input_points': input_points})
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase__ = torch.from_numpy(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase__ = tf.convert_to_tensor(lowercase__)
# point batch size of 1 by default
lowerCAmelCase__ = tf.expand_dims(lowercase__ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels})
return encoding_image_processor
def __snake_case ( self : str , lowercase__ : Optional[int] , lowercase__ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ = max([point.shape[0] for point in input_points])
lowerCAmelCase__ = []
for i, point in enumerate(lowercase__):
if point.shape[0] != expected_nb_points:
lowerCAmelCase__ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
lowerCAmelCase__ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(lowercase__)
lowerCAmelCase__ = processed_input_points
return input_points, input_labels
def __snake_case ( self : Optional[Any] , lowercase__ : int , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : Optional[Any]=False):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = original_size
lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor._get_preprocess_shape(lowercase__ , longest_edge=lowercase__)
lowerCAmelCase__ = deepcopy(lowercase__).astype(lowercase__)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 2 , 2)
lowerCAmelCase__ = coords[..., 0] * (new_w / old_w)
lowerCAmelCase__ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase__ = coords.reshape(-1 , 4)
return coords
def __snake_case ( self : Dict , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : int=None , ):
'''simple docstring'''
if input_points is not None:
if hasattr(lowercase__ , 'numpy'): # Checks for TF or Torch tensor
lowerCAmelCase__ = input_points.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_points[0] , lowercase__):
raise ValueError('Input points must be a list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__) for input_point in input_points]
else:
lowerCAmelCase__ = None
if input_labels is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_labels.numpy().tolist()
if not isinstance(lowercase__ , lowercase__) or not isinstance(input_labels[0] , lowercase__):
raise ValueError('Input labels must be a list of list integers.')
lowerCAmelCase__ = [np.array(lowercase__) for label in input_labels]
else:
lowerCAmelCase__ = None
if input_boxes is not None:
if hasattr(lowercase__ , 'numpy'):
lowerCAmelCase__ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase__ , lowercase__)
or not isinstance(input_boxes[0] , lowercase__)
or not isinstance(input_boxes[0][0] , lowercase__)
):
raise ValueError('Input boxes must be a list of list of list of floating points.')
lowerCAmelCase__ = [np.array(lowercase__).astype(np.floataa) for box in input_boxes]
else:
lowerCAmelCase__ = None
return input_points, input_labels, input_boxes
@property
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase__))
def __snake_case ( self : int , *lowercase__ : int , **lowercase__ : int):
'''simple docstring'''
return self.image_processor.post_process_masks(*lowercase__ , **lowercase__)
| 119
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowercase = parser.parse_args()
if args.model_type == "bert":
__lowercase = BertForMaskedLM.from_pretrained(args.model_name)
__lowercase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__lowercase = model.state_dict()
__lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
__lowercase = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__lowercase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__lowercase = state_dict["""cls.predictions.decoder.weight"""]
__lowercase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowercase = state_dict[f'''cls.predictions.transform.dense.{w}''']
__lowercase = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ : Dict = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase_ : List[str] = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = GPTaTokenizer
def __init__( self : Optional[int] , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple="<|endoftext|>" , lowercase_ : str="<|endoftext|>" , lowercase_ : Dict="<|endoftext|>" , lowercase_ : Tuple=False , **lowercase_ : Optional[int] , ):
'''simple docstring'''
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('''add_bos_token''' , lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , lowercase_) != add_prefix_space:
SCREAMING_SNAKE_CASE_ : int = getattr(lowercase_ , pre_tok_state.pop('''type'''))
SCREAMING_SNAKE_CASE_ : str = add_prefix_space
SCREAMING_SNAKE_CASE_ : Dict = pre_tok_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = add_prefix_space
def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : List[Any] , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.get('''is_split_into_words''' , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowercase_ : List[str] , **lowercase_ : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.get('''is_split_into_words''' , lowercase_)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : "Conversation"):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_) + [self.eos_token_id])
if len(lowercase_) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : Any = input_ids[-self.model_max_length :]
return input_ids
| 91
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCamelCase : Tuple = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
_UpperCamelCase : Any = parser.parse_args()
_UpperCamelCase : List[Any] = 'cpu'
_UpperCamelCase : Dict = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
_UpperCamelCase : Optional[int] = 'path-to-your-trained-model'
_UpperCamelCase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCamelCase : List[Any] = pipe.to(device)
# to channels last
_UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
_UpperCamelCase : int = pipe.vae.to(memory_format=torch.channels_last)
_UpperCamelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCamelCase : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCamelCase : Dict = torch.randn(2, 4, 64, 64)
_UpperCamelCase : Any = torch.rand(1) * 999
_UpperCamelCase : List[str] = torch.randn(2, 77, 768)
_UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_UpperCamelCase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCamelCase : List[str] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCamelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCamelCase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCamelCase : List[Any] = 666
_UpperCamelCase : List[Any] = torch.Generator(device).manual_seed(seed)
_UpperCamelCase : List[Any] = {'generator': generator}
if args.steps is not None:
_UpperCamelCase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCamelCase : Tuple = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 371
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_UpperCamelCase : Optional[Any] = 'examples/'
_UpperCamelCase : Any = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_UpperCamelCase : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_UpperCamelCase : List[str] = 'README.md'
def snake_case (A_ :str , A_ :Optional[Any] , A_ :Any ):
'''simple docstring'''
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a, a : Any = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , A_ )
a : Union[str, Any] = re_pattern.sub(A_ , A_ )
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(A_ )
def snake_case (A_ :List[Any] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(A_ , A_ ) , A_ , pattern='examples' )
def snake_case (A_ :Tuple , A_ :Optional[Any]=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A_ , A_ , A_ )
if not patch:
update_version_in_examples(A_ )
def snake_case ():
'''simple docstring'''
a : str = '🤗 Transformers currently provides the following architectures'
a : Dict = '1. Want to contribute a new model?'
with open(A_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start of the list.
a : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(A_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(A_ )
def snake_case ():
'''simple docstring'''
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : List[str] = f.read()
a : str = REPLACE_PATTERNS['init'][0].search(A_ ).groups()[0]
return packaging.version.parse(A_ )
def snake_case (A_ :Optional[Any]=False ):
'''simple docstring'''
a : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Tuple = default_version.base_version
elif patch:
a : Union[str, Any] = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
a : Optional[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
a : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' )
if len(A_ ) == 0:
a : int = default_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ , patch=A_ )
def snake_case ():
'''simple docstring'''
a : str = get_version()
a : Optional[int] = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
a : Optional[int] = current_version.base_version
# Check with the user we got that right.
a : str = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(A_ ) == 0:
a : Union[str, Any] = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(A_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_UpperCamelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 186
| 0
|
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionControlNetImgaImgPipeline
_UpperCAmelCase :Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_UpperCAmelCase :Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase :Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
_UpperCAmelCase :Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowercase__: Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowercase__: Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__: Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: Tuple = CLIPTextModel(_UpperCAmelCase )
lowercase__: List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: Tuple = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('''mps''' ):
lowercase__: List[Any] = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__: List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__: Tuple = 2
lowercase__: Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , )
lowercase__: Tuple = floats_tensor(control_image.shape , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__: Any = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__: Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__: List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _snake_case ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _snake_case ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionControlNetImgaImgPipeline
_UpperCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
_UpperCAmelCase :Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase :Tuple = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowercase__: Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
lowercase__: List[str] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
lowercase__: Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
lowercase__: Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: str = CLIPTextModel(_UpperCAmelCase )
lowercase__: str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: str = MultiControlNetModel([controlneta, controlneta] )
lowercase__: Union[str, Any] = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('''mps''' ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
lowercase__: Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = 2
lowercase__: Tuple = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , ),
]
lowercase__: int = floats_tensor(control_image[0].shape , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
lowercase__: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__: Union[str, Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__: int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[Any] = self.get_dummy_components()
lowercase__: str = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
lowercase__: Optional[Any] = 10.0
lowercase__: Dict = 4
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: int = steps
lowercase__: Dict = scale
lowercase__: Tuple = pipe(**_UpperCAmelCase )[0]
lowercase__: Dict = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Optional[Any] = steps
lowercase__: int = scale
lowercase__: Tuple = pipe(**_UpperCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Optional[int] = steps
lowercase__: List[Any] = scale
lowercase__: Tuple = pipe(**_UpperCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowercase__: Tuple = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Tuple = steps
lowercase__: List[Any] = scale
lowercase__: Union[str, Any] = pipe(**_UpperCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _snake_case ( self ):
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _snake_case ( self ):
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _snake_case ( self ):
lowercase__: Optional[int] = self.get_dummy_components()
lowercase__: Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: List[str] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
lowercase__: List[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_UpperCAmelCase , controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__: str = '''evil space-punk bird'''
lowercase__: List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((512, 512) )
lowercase__: str = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((512, 512) )
lowercase__: Any = pipe(
_UpperCAmelCase , _UpperCAmelCase , control_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
lowercase__: int = output.images[0]
assert image.shape == (512, 512, 3)
lowercase__: int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 177
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: List[Any] = {}
lowercase__: List[Any] = {}
lowercase__: Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase__: Dict = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowercase__: Any = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowercase__: Optional[Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowercase__: Union[str, Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase__: Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowercase__: Tuple = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowercase__: List[str] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowercase__: str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowercase__: List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowercase__: Dict = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowercase__: int = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
lowercase__: Union[str, Any] = load_image(_UpperCAmelCase )
lowercase__: Dict = self.image_processor.size['''longest_edge''']
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
lowercase__: Tuple = self.get_inference_context()
with inference_context():
lowercase__: Optional[Any] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
lowercase__: Any = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
lowercase__: Tuple = image_embeddings
lowercase__: Optional[Any] = grid_points.shape[1]
lowercase__: Tuple = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = grid_points[:, i : i + points_per_batch, :, :]
lowercase__: int = input_labels[:, i : i + points_per_batch]
lowercase__: Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0.88 , _UpperCAmelCase=0.95 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
lowercase__: List[Any] = model_inputs.pop('''input_boxes''' )
lowercase__: List[Any] = model_inputs.pop('''is_last''' )
lowercase__: Any = model_inputs.pop('''original_sizes''' ).tolist()
lowercase__: Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
lowercase__: List[Any] = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase__: int = model_outputs['''pred_masks''']
lowercase__: str = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
lowercase__: str = model_outputs['''iou_scores''']
lowercase__, lowercase__, lowercase__: Optional[int] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
lowercase__: int = []
lowercase__: str = []
lowercase__: List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
lowercase__: Any = torch.cat(_UpperCAmelCase )
lowercase__: Dict = torch.cat(_UpperCAmelCase )
lowercase__, lowercase__, lowercase__, lowercase__: Any = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
lowercase__: Any = {}
if output_rle_mask:
lowercase__: Optional[Any] = rle_mask
if output_bboxes_mask:
lowercase__: Optional[int] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 177
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 3
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3
| 1
|
"""simple docstring"""
import string
def A__ ( UpperCamelCase ):
A = ""
for i in sequence:
A = ord(UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def A__ ( UpperCamelCase ):
A = string.ascii_letters
A = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(UpperCamelCase )] if c in letters else c for c in sequence )
def A__ ( ):
from timeit import timeit
print("Running performance benchmarks..." )
A = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=UpperCamelCase )} seconds" )
print(F"> atbash(): {timeit('atbash(printable)' , setup=UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 292
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( UpperCamelCase = "laptop" ):
A = F"https://www.amazon.in/laptop/s?k={product}"
A = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A = item.ha.text
A = "https://www.amazon.in/" + item.ha.a["href"]
A = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A = "Not available"
try:
A = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A = ""
try:
A = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
A = float("nan" )
except AttributeError:
pass
A = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A = " "
A = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case : Optional[int] = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 292
| 1
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ = ['''text''', '''image''', '''audio''']
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
inputs.append(create_inputs(SCREAMING_SNAKE_CASE_ ) )
else:
raise ValueError(f"""Invalid type requested: {input_type}""" )
return inputs
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List ):
'''simple docstring'''
lowercase__ : str = []
for output in outputs:
if isinstance(SCREAMING_SNAKE_CASE_ , (str, AgentText) ):
output_types.append('text' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f"""Invalid output: {output}""" )
return output_types
@is_tool_test
class SCREAMING_SNAKE_CASE__ :
def snake_case_ ( self):
self.assertTrue(hasattr(self.tool , 'inputs'))
self.assertTrue(hasattr(self.tool , 'outputs'))
lowercase__ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
lowercase__ : Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def snake_case_ ( self):
lowercase__ : Optional[int] = create_inputs(self.tool.inputs)
lowercase__ : Optional[int] = self.tool(*a)
# There is a single output
if len(self.tool.outputs) == 1:
lowercase__ : Optional[int] = [outputs]
self.assertListEqual(output_types(a) , self.tool.outputs)
def snake_case_ ( self):
self.assertTrue(hasattr(self.tool , 'description'))
self.assertTrue(hasattr(self.tool , 'default_checkpoint'))
self.assertTrue(self.tool.description.startswith('This is a tool that'))
def snake_case_ ( self):
lowercase__ : str = create_inputs(self.tool.inputs)
lowercase__ : List[str] = self.tool(*a)
if not isinstance(a , a):
lowercase__ : List[str] = [outputs]
self.assertEqual(len(a) , len(self.tool.outputs))
for output, output_type in zip(a , self.tool.outputs):
lowercase__ : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a))
def snake_case_ ( self):
lowercase__ : Union[str, Any] = create_inputs(self.tool.inputs)
lowercase__ : Dict = []
for _input, input_type in zip(a , self.tool.inputs):
if isinstance(a , a):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
lowercase__ : List[Any] = self.tool(*a)
if not isinstance(a , a):
lowercase__ : Optional[Any] = [outputs]
self.assertEqual(len(a) , len(self.tool.outputs))
| 216
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
snake_case_ = False
snake_case_ = True
snake_case_ = False
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case_ = parser.parse_args()
snake_case_ = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
snake_case_ = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
snake_case_ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
snake_case_ = reader.read()
snake_case_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
snake_case_ = UNetaDModel(**config)
else:
snake_case_ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
snake_case_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
snake_case_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
snake_case_ = config[key]
del config[key]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
snake_case_ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
snake_case_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
snake_case_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
snake_case_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
snake_case_ = param_value
snake_case_ = True
if not has_changed:
snake_case_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 216
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( lowerCamelCase__ ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=[16, 48, 96] , _snake_case=[1, 3, 6] , _snake_case=[1, 2, 10] , _snake_case=[7, 3, 3] , _snake_case=[4, 2, 2] , _snake_case=[2, 1, 1] , _snake_case=[2, 2, 2] , _snake_case=[False, False, True] , _snake_case=[0.0, 0.0, 0.0] , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=True , _snake_case=True , _snake_case=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_sizes
_lowerCAmelCase = patch_stride
_lowerCAmelCase = patch_padding
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = num_heads
_lowerCAmelCase = stride_kv
_lowerCAmelCase = depth
_lowerCAmelCase = cls_token
_lowerCAmelCase = attention_drop_rate
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case , training=_snake_case )
_lowerCAmelCase = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFCvtForImageClassification(_snake_case )
_lowerCAmelCase = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModelTester(self )
_lowerCAmelCase = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_lowerCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1e-4 ) )
| 82
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : List[Any] = []
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = 0.0_00_85, SCREAMING_SNAKE_CASE_ = 0.0_12, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "linspace", SCREAMING_SNAKE_CASE_ = 0, ) -> List[str]:
if trained_betas is not None:
UpperCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase : int = (
torch.linspace(beta_start**0.5, beta_end**0.5, SCREAMING_SNAKE_CASE_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCamelCase : Optional[int] = 1.0 - self.betas
UpperCamelCase : int = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if schedule_timesteps is None:
UpperCamelCase : Union[str, Any] = self.timesteps
UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
UpperCamelCase : str = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
UpperCamelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
UpperCamelCase : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
UpperCamelCase : Dict = self.sigmas[step_index]
else:
UpperCamelCase : int = self.sigmas_interpol[step_index]
UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Dict = num_inference_steps
UpperCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase : int = np.linspace(0, num_train_timesteps - 1, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Any = (np.arange(SCREAMING_SNAKE_CASE_, 0, -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCamelCase : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = np.interp(SCREAMING_SNAKE_CASE_, np.arange(0, len(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
UpperCamelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
else:
UpperCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
UpperCamelCase : int = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=timesteps.dtype )
UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase : Dict = defaultdict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get log sigma
UpperCamelCase : List[Any] = sigma.log()
# get distribution
UpperCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase : Tuple = low_idx + 1
UpperCamelCase : List[str] = self.log_sigmas[low_idx]
UpperCamelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase : int = (low - log_sigma) / (low - high)
UpperCamelCase : Tuple = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
UpperCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ) -> Optional[int]:
return self.sample is None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[SchedulerOutput, Tuple]:
UpperCamelCase : str = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
UpperCamelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase : Tuple = self.sigmas[step_index]
UpperCamelCase : Dict = self.sigmas_interpol[step_index + 1]
UpperCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase : str = self.sigmas[step_index - 1]
UpperCamelCase : Dict = self.sigmas_interpol[step_index]
UpperCamelCase : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase : Dict = 0
UpperCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase : Dict = sigma_next - sigma_hat
UpperCamelCase : Any = self.sample
UpperCamelCase : str = None
UpperCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
UpperCamelCase : List[str] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase : str = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase : Dict = self.timesteps.to(original_samples.device )
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : str = [self.index_for_timestep(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for t in timesteps]
UpperCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase : int = sigma.unsqueeze(-1 )
UpperCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 119
| 0
|
"""simple docstring"""
lowerCAmelCase : Optional[int] = '''Tobias Carryer'''
from time import time
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=int(time() ) ): # noqa: B008
"""simple docstring"""
lowerCamelCase = multiplier
lowerCamelCase = increment
lowerCamelCase = modulo
lowerCamelCase = seed
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
lowerCAmelCase : Dict = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 369
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Tuple = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCAmelCase : Tuple = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Dict = None
lowerCAmelCase : Union[str, Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : List[Any] = 45
lowerCAmelCase : List[str] = 1581
lowerCAmelCase : List[str] = 1517
lowerCAmelCase : List[Any] = 1570
lowerCAmelCase : List[str] = 1584
lowerCAmelCase : Tuple = 1793
lowerCAmelCase : Union[str, Any] = 1795
lowerCAmelCase : Tuple = 1916
lowerCAmelCase : Tuple = 1864
lowerCAmelCase : Any = 1905
lowerCAmelCase : int = 1919
lowerCAmelCase : Union[str, Any] = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : str = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 168
| 0
|
"""simple docstring"""
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, "src", "transformers")
__A = "\n{0} = None\n"
__A = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
__A = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(_SCREAMING_SNAKE_CASE )
snake_case : Any = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "tokenizers" )
snake_case : Any = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "tensorflow_text" )
snake_case : Optional[Any] = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "sentencepiece_and_tokenizers" )
snake_case : str = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "sentencepiece_and_tensorflow_text" )
snake_case : int = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "sentencepiece_and_tokenizers_and_vision" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _SCREAMING_SNAKE_CASE )
self.assertIn("tensorflow_text" , _SCREAMING_SNAKE_CASE )
self.assertIn("sentencepiece_and_tokenizers" , _SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertModel" , objects["tf"] )
self.assertIn("FlaxBertModel" , objects["flax"] )
self.assertIn("BertModel" , objects["torch"] )
self.assertIn("TFBertTokenizer" , objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer" , objects["sentencepiece_and_tokenizers"] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(_SCREAMING_SNAKE_CASE , "\nCONSTANT = None\n" )
snake_case : List[Any] = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
_SCREAMING_SNAKE_CASE , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
snake_case : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
snake_case : int = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
snake_case : Optional[Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _SCREAMING_SNAKE_CASE )
| 148
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = jnp.ones((batch_size, length) ) / length
return scores
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Union[str, Any] = None
A_ : Any = 20
A_ : Any = self._get_uniform_logits(batch_size=2 , length=_SCREAMING_SNAKE_CASE )
# tweak scores to not be uniform anymore
A_ : Dict = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
A_ : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
A_ : List[str] = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
A_ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=1.3 )
A_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_sharper(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
A_ : List[Any] = jax.nn.softmax(temp_dist_warper_smoother(_SCREAMING_SNAKE_CASE , scores.copy() , cur_len=_SCREAMING_SNAKE_CASE ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Any = None
A_ : List[Any] = 10
A_ : str = 2
# create ramp distribution
A_ : Any = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy()
A_ : List[Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : Tuple = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
A_ : Optional[int] = 5
A_ : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
A_ : Optional[Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, length) ).copy()
A_ : Dict = top_k_warp_safety_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = None
A_ : Optional[Any] = 10
A_ : Any = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
A_ : Optional[int] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
A_ : str = FlaxTopPLogitsWarper(0.8 )
A_ : Optional[int] = np.exp(top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
A_ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# check edge cases with negative and extreme logits
A_ : Union[str, Any] = np.broadcast_to(np.arange(_SCREAMING_SNAKE_CASE )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
A_ : str = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
A_ : str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
A_ : str = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : str = 20
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 0
A_ : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that min length is applied at length 5
A_ : int = ids_tensor((batch_size, 20) , vocab_size=20 )
A_ : List[Any] = 5
A_ : Optional[int] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Optional[int] = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
A_ : Tuple = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Any = 15
A_ : int = min_dist_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = 20
A_ : Optional[int] = 4
A_ : Optional[int] = 0
A_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the bos_token_id score
A_ : Optional[Any] = ids_tensor((batch_size, 1) , vocab_size=20 )
A_ : str = 1
A_ : List[str] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[str] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
A_ : Optional[int] = 3
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->List[str]:
'''simple docstring'''
A_ : Union[str, Any] = 20
A_ : str = 4
A_ : Dict = 0
A_ : Optional[int] = 5
A_ : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
# check that all scores are -inf except the eos_token_id when max_length is reached
A_ : List[Any] = ids_tensor((batch_size, 4) , vocab_size=20 )
A_ : Any = 4
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
A_ : int = 3
A_ : Union[str, Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Dict = logits_processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
self.assertFalse(jnp.isinf(_SCREAMING_SNAKE_CASE ).any() )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Union[str, Any] = 15
A_ : str = 2
A_ : int = 1
A_ : List[str] = 15
# dummy input_ids and scores
A_ : Tuple = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : int = input_ids.copy()
A_ : List[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = scores.copy()
# instantiate all dist processors
A_ : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Any = FlaxTopKLogitsWarper(3 )
A_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = 10
# no processor list
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[str] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# with processor list
A_ : Any = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : List[str] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _snake_case ( self )->Dict:
'''simple docstring'''
A_ : str = 4
A_ : Dict = 10
A_ : Tuple = 15
A_ : List[str] = 2
A_ : List[str] = 1
A_ : Union[str, Any] = 15
# dummy input_ids and scores
A_ : Any = ids_tensor((batch_size, sequence_length) , _SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = input_ids.copy()
A_ : Optional[Any] = self._get_uniform_logits(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = scores.copy()
# instantiate all dist processors
A_ : List[str] = FlaxTemperatureLogitsWarper(temperature=0.5 )
A_ : Optional[Any] = FlaxTopKLogitsWarper(3 )
A_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
A_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Dict = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
A_ : str = 10
# no processor list
def run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : int = temp_dist_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = top_k_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = top_p_warp(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Dict = min_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Any = bos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = eos_dist_proc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
# with processor list
def run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
A_ : Optional[int] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cur_len=_SCREAMING_SNAKE_CASE )
return scores
A_ : Optional[int] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = jax.jit(_SCREAMING_SNAKE_CASE )
A_ : Dict = jitted_run_no_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : List[Any] = jitted_run_processor_list(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# scores should be equal
self.assertTrue(jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 186
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : int = 16 , snake_case_ : int = 88 , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 0.0 , snake_case_ : int = 32 , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "geglu" , snake_case_ : Optional[int] = None , ) -> str:
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case_ , attention_head_dim=snake_case_ , in_channels=snake_case_ , num_layers=snake_case_ , dropout=snake_case_ , norm_num_groups=snake_case_ , cross_attention_dim=snake_case_ , attention_bias=snake_case_ , sample_size=snake_case_ , num_vector_embeds=snake_case_ , activation_fn=snake_case_ , num_embeds_ada_norm=snake_case_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def __magic_name__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ , cross_attention_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case_ )
| 230
| 1
|
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 3
|
'''simple docstring'''
import os
import sys
import unittest
lowercase : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase : Any = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase : Optional[int] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Tuple = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Any = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : List[Any] = {'''BertModelTest''': '''BertModelTester'''}
A : int = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : Tuple = get_model_to_test_mapping(SCREAMING_SNAKE_CASE )
A : List[str] = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A : Union[str, Any] = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : int = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE )
A : Dict = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A : str = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 3
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 339
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=99 , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : int=9 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : str=37 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.002 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , ) -> Optional[Any]:
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = encoder_seq_length
__lowerCamelCase = decoder_seq_length
# For common tests
__lowerCamelCase = self.decoder_seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_attention_mask
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = d_ff
__lowerCamelCase = relative_attention_num_buckets
__lowerCamelCase = dropout_rate
__lowerCamelCase = initializer_factor
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = decoder_start_token_id
__lowerCamelCase = None
__lowerCamelCase = decoder_layers
def __A ( self : Any ) -> Tuple:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
if attention_mask is None:
__lowerCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowerCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowerCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
__lowerCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __A ( self : List[Any] ) -> Tuple:
__lowerCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowerCamelCase = input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowerCamelCase = self.get_config()
__lowerCamelCase = config.num_attention_heads
__lowerCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def __A ( self : Tuple ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self : Optional[Any] ) -> Any:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : List[Any] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> int:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__lowerCamelCase = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = result.last_hidden_state
__lowerCamelCase = result.past_key_values
__lowerCamelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[int]:
__lowerCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
__lowerCamelCase = model(**SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
a__ : List[Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a__ : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a__ : Tuple = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a__ : int = True
a__ : int = False
a__ : Tuple = False
a__ : Optional[int] = True
a__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a__ : Tuple = [0.8, 0.9]
def __A ( self : Tuple ) -> Tuple:
__lowerCamelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __A ( self : List[str] ) -> Union[str, Any]:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __A ( self : Union[str, Any] ) -> Any:
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Any:
__lowerCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = config_and_inputs[0]
__lowerCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
__lowerCamelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowerCamelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowerCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __A ( self : Tuple ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __A ( self : int ) -> Optional[Any]:
__lowerCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
__lowerCamelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.