code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__a :List[Any] = logging.get_logger(__name__)
__a :List[Any] = {'vocab_file': 'spiece.model'}
__a :Optional[Any] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__a :int = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__a :Optional[Any] = 0
__a :Optional[Any] = 1
__a :List[Any] = 2
__a :Dict = 3
__a :List[Any] = 4
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : str = 'left'
def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Dict="<s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Tuple="<sep>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Union[str, Any]="<cls>" , UpperCAmelCase : Optional[int]="<mask>" , UpperCAmelCase : List[Any]=["<eop>", "<eod>"] , UpperCAmelCase : Dict = None , **UpperCAmelCase : Tuple , ):
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
A_ = 3
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
@property
def __A ( self : Any ):
return len(self.sp_model )
def __A ( self : Any ):
A_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self : str , UpperCAmelCase : Dict ):
A_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
if self.remove_space:
A_ = " ".join(inputs.strip().split() )
else:
A_ = inputs
A_ = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A_ = unicodedata.normalize("NFKD" , UpperCAmelCase )
A_ = "".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase )] )
if self.do_lower_case:
A_ = outputs.lower()
return outputs
def __A ( self : List[str] , UpperCAmelCase : str ):
A_ = self.preprocess_text(UpperCAmelCase )
A_ = self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
A_ = []
for piece in pieces:
if len(UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A_ = cur_pieces[1:]
else:
A_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase )
else:
new_pieces.append(UpperCAmelCase )
return new_pieces
def __A ( self : List[Any] , UpperCAmelCase : Tuple ):
return self.sp_model.PieceToId(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : Union[str, Any] ):
return self.sp_model.IdToPiece(UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : int ):
A_ = "".join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip()
return out_string
def __A ( self : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] = False , UpperCAmelCase : str = None , UpperCAmelCase : Dict = True , **UpperCAmelCase : Optional[Any] , ):
A_ = kwargs.pop("use_source_tokenizer" , UpperCAmelCase )
A_ = self.convert_ids_to_tokens(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ = []
A_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) )
A_ = []
sub_texts.append(UpperCAmelCase )
else:
current_sub_text.append(UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A_ = "".join(UpperCAmelCase )
A_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ = self.clean_up_tokenization(UpperCAmelCase )
return clean_text
else:
return text
def __A ( self : int , UpperCAmelCase : Tuple , UpperCAmelCase : str = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Tuple = None , UpperCAmelCase : Optional[int] = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1, 1]
return ([0] * len(UpperCAmelCase )) + [1, 1]
def __A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] = None ):
A_ = [self.sep_token_id]
A_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , "wb" ) as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 86
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCamelCase = random.Random()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> Optional[Any]:
if rng is None:
lowerCAmelCase__ : Union[str, Any] = global_rng
lowerCAmelCase__ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=400 , snake_case=2_000 , snake_case=2_048 , snake_case=128 , snake_case=1 , snake_case=512 , snake_case=30 , snake_case=44_100 , ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : Optional[int] = max_seq_length
lowerCAmelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = spectrogram_length
lowerCAmelCase__ : Any = feature_size
lowerCAmelCase__ : int = num_audio_channels
lowerCAmelCase__ : Optional[int] = hop_length
lowerCAmelCase__ : List[str] = chunk_length
lowerCAmelCase__ : Optional[Any] = sampling_rate
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE_ ( self , snake_case=False , snake_case=False ):
"""simple docstring"""
def _flatten(snake_case ):
return list(itertools.chain(*snake_case ) )
if equal_length:
lowerCAmelCase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Dict = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(snake_case , "feature_size" ) )
self.assertTrue(hasattr(snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(snake_case , "hop_length" ) )
self.assertTrue(hasattr(snake_case , "chunk_length" ) )
self.assertTrue(hasattr(snake_case , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
lowerCAmelCase__ : Tuple = self.feature_extraction_class.from_pretrained(snake_case )
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : int = feat_extract_second.to_dict()
lowerCAmelCase__ : Union[str, Any] = dict_first.pop("mel_filters" )
lowerCAmelCase__ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(snake_case )
lowerCAmelCase__ : Optional[Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : Dict = feat_extract_second.to_dict()
lowerCAmelCase__ : Tuple = dict_first.pop("mel_filters" )
lowerCAmelCase__ : Dict = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Optional[int] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase__ : Optional[int] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase__ : List[str] = feature_extractor(
snake_case , return_tensors="np" , sampling_rate=44_100 , mask_audio=snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : Tuple = np.asarray(snake_case )
lowerCAmelCase__ : List[Any] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase__ : int = ds.sort("id" ).select(range(snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self._load_datasamples(1 )
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractor()
lowerCAmelCase__ : Dict = feature_extractor(snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCAmelCase__ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case , atol=1e-4 ) )
| 453
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 710
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase_ = get_tests_dir("fixtures")
lowercase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase_ = get_tests_dir("fixtures/dummy-config.json")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Tuple = 0
def snake_case__ ( self : int ):
__snake_case : int = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__snake_case : Tuple = WavaVecaFeatureExtractor(**_lowerCAmelCase )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
config.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
# make sure private variable is not incorrectly saved
__snake_case : Any = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Dict = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : List[str] ):
with self.assertRaisesRegex(
_lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
__snake_case : Any = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def snake_case__ ( self : Dict ):
with self.assertRaisesRegex(
_lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case : Dict = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , revision="""aaaaaa""" )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
_lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__snake_case : Union[str, Any] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def snake_case__ ( self : str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCAmelCase ):
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
__snake_case : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
__snake_case : Tuple = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase , trust_remote_code=_lowerCAmelCase )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def snake_case__ ( self : List[Any] ):
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : List[str] = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_lowerCAmelCase )
__snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : Union[str, Any] ):
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Tuple = True
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local
__snake_case : Any = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(_lowerCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 390
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def __a(SCREAMING_SNAKE_CASE_ : Namespace ):
'''simple docstring'''
return TrainCommand(snake_case_ )
class lowerCAmelCase_ ( __lowercase ):
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=_lowerCAmelCase , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=_lowerCAmelCase , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=_lowerCAmelCase , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=_lowerCAmelCase , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=_lowerCAmelCase , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=_lowerCAmelCase , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=_lowerCAmelCase , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=_lowerCAmelCase , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=_lowerCAmelCase , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=_lowerCAmelCase , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=_lowerCAmelCase , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=_lowerCAmelCase , default=1E-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = logging.get_logger("transformers-cli/training" )
_lowerCAmelCase = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = args.output
_lowerCAmelCase = args.column_label
_lowerCAmelCase = args.column_text
_lowerCAmelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase = args.validation_split
_lowerCAmelCase = args.train_batch_size
_lowerCAmelCase = args.valid_batch_size
_lowerCAmelCase = args.learning_rate
_lowerCAmelCase = args.adam_epsilon
def _snake_case ( self ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _snake_case ( self ) -> List[str]:
raise NotImplementedError
def _snake_case ( self ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {'''vocab_file''': '''vocab.txt'''}
SCREAMING_SNAKE_CASE : List[str] = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
SCREAMING_SNAKE_CASE : str = {
'''openbmb/cpm-ant-10b''': 1_0_2_4,
}
def __UpperCAmelCase ( snake_case_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = collections.OrderedDict()
with open(snake_case_ , """r""" , encoding="""utf-8""" ) as reader:
_lowerCAmelCase = reader.readlines()
for index, token in enumerate(snake_case_ ):
_lowerCAmelCase = token.rstrip("""\n""" )
_lowerCAmelCase = index
return vocab
class __lowerCamelCase ( __lowercase ):
def __init__(self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ):
'''simple docstring'''
_lowerCAmelCase = vocab
_lowerCAmelCase = unk_token
_lowerCAmelCase = max_input_chars_per_word
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = list(lowerCamelCase )
if len(lowerCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_lowerCAmelCase = 0
_lowerCAmelCase = []
while start < len(lowerCamelCase ):
_lowerCAmelCase = len(lowerCamelCase )
_lowerCAmelCase = None
while start < end:
_lowerCAmelCase = """""".join(chars[start:end] )
if substr in self.vocab:
_lowerCAmelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase )
_lowerCAmelCase = end
return sub_tokens
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = False
def __init__(self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ):
'''simple docstring'''
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , )
_lowerCAmelCase = bod_token
_lowerCAmelCase = eod_token
_lowerCAmelCase = load_vocab(lowerCamelCase )
_lowerCAmelCase = self.encoder[space_token]
_lowerCAmelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def A__ (self ):
'''simple docstring'''
return self.encoder["\n"]
@property
def A__ (self ):
'''simple docstring'''
return len(self.encoder )
def A__ (self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = []
for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) )
return output_tokens
def A__ (self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = [i for i in token_ids if i >= 0]
_lowerCAmelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return token in self.encoder
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return "".join(lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase , self.unk_token )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if os.path.isdir(lowerCamelCase ):
_lowerCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
_lowerCAmelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
_lowerCAmelCase = 0
if " " in self.encoder:
_lowerCAmelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
_lowerCAmelCase = self.encoder["""\n"""]
del self.encoder["\n"]
_lowerCAmelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
_lowerCAmelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ (self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase ))
| 156
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Any ={
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class _SCREAMING_SNAKE_CASE (__UpperCAmelCase ):
A__ = """switch_transformers"""
A__ = ["""past_key_values"""]
A__ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=32128 , __UpperCamelCase : str=768 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : str=2048 , __UpperCamelCase : Dict=64 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Any=12 , __UpperCamelCase : Dict=3 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Union[str, Any]=8 , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=0.01 , __UpperCamelCase : Optional[Any]="float32" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Tuple=32 , __UpperCamelCase : Any=128 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Tuple=1e-6 , __UpperCamelCase : str=0.001 , __UpperCamelCase : Tuple=0.001 , __UpperCamelCase : str=1.0 , __UpperCamelCase : Dict="relu" , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Any=0 , __UpperCamelCase : Any=1 , **__UpperCamelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = vocab_size
snake_case__ : str = d_model
snake_case__ : Tuple = d_kv
snake_case__ : Tuple = d_ff
snake_case__ : str = num_sparse_encoder_layers
snake_case__ : Dict = num_layers
snake_case__ : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case__ : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case__ : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case__ : str = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case__ : List[str] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case__ : Union[str, Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case__ : int = num_heads
snake_case__ : Tuple = num_experts
snake_case__ : str = expert_capacity
snake_case__ : Optional[int] = router_bias
snake_case__ : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
snake_case__ : Tuple = router_dtype
snake_case__ : Optional[Any] = router_ignore_padding_tokens
snake_case__ : Optional[int] = relative_attention_num_buckets
snake_case__ : Optional[Any] = relative_attention_max_distance
snake_case__ : List[Any] = dropout_rate
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : Any = initializer_factor
snake_case__ : List[str] = feed_forward_proj
snake_case__ : Optional[Any] = use_cache
snake_case__ : Union[str, Any] = add_router_probs
snake_case__ : str = router_z_loss_coef
snake_case__ : str = router_aux_loss_coef
snake_case__ : List[Any] = self.feed_forward_proj.split('''-''' )
snake_case__ : Union[str, Any] = act_info[-1]
snake_case__ : Union[str, Any] = act_info[0] == '''gated'''
if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case__ : Union[str, Any] = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 711
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase : Any =logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
A__ = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A__ = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
A__ = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
A__ = field(
default=lowercase__, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A__ = field(
default=lowercase__, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
A__ = field(
default=lowercase__, metadata={'help': 'A csv or a json file containing the training data.'} )
A__ = field(
default=lowercase__, metadata={'help': 'A csv or a json file containing the validation data.'} )
A__ = field(default=lowercase__, metadata={'help': 'A csv or a json file containing the test data.'} )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
snake_case__ : int = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
snake_case__ : str = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _SCREAMING_SNAKE_CASE :
A__ = field(
default=lowercase__, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ = field(
default=lowercase__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ = field(
default=lowercase__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ = field(
default=lowercase__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
A__ = field(
default=lowercase__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
A__ = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
A__ = field(
default=lowercase__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
snake_case__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
snake_case__ : Optional[int] = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
snake_case__ : int = data_args.train_file.split('''.''' )[-1]
snake_case__ : str = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
snake_case__ : List[str] = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
snake_case__ : Any = load_dataset('''csv''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
snake_case__ : Union[str, Any] = load_dataset('''json''' , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
snake_case__ : List[Any] = raw_datasets['''train'''].features['''label'''].names
snake_case__ : Optional[Any] = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
snake_case__ : Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=UpperCamelCase__ , )
snake_case__ : Tuple = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
snake_case__ : List[str] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case__ : str = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
snake_case__ : List[Any] = {'''Refused''': 0, '''Entailed''': 1}
snake_case__ : Optional[Any] = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
snake_case__ : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(UpperCamelCase__ :Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(UpperCamelCase__ :List[Any] ):
snake_case__ : str = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
snake_case__ : List[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
snake_case__ : Optional[Any] = examples['''statement''']
snake_case__ : str = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
snake_case__ : Tuple = tokenizer(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ )
snake_case__ : List[str] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
snake_case__ : str = raw_datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
snake_case__ : Optional[int] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
snake_case__ : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
snake_case__ : int = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
snake_case__ : Union[str, Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
snake_case__ : Union[str, Any] = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
snake_case__ : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(UpperCamelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase__ :EvalPrediction ):
snake_case__ : Optional[Any] = p.predictions[0] if isinstance(p.predictions , UpperCamelCase__ ) else p.predictions
snake_case__ : Dict = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case__ : str = default_data_collator
elif training_args.fpaa:
snake_case__ : List[str] = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 )
else:
snake_case__ : str = None
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase__ , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
snake_case__ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : List[Any] = last_checkpoint
snake_case__ : Union[str, Any] = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
snake_case__ : Dict = train_result.metrics
snake_case__ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
snake_case__ : Any = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , UpperCamelCase__ )
trainer.save_metrics('''train''' , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case__ : Union[str, Any] = trainer.evaluate(eval_dataset=UpperCamelCase__ )
snake_case__ : str = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
snake_case__ : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics('''eval''' , UpperCamelCase__ )
trainer.save_metrics('''eval''' , UpperCamelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
snake_case__ : Any = predict_dataset.remove_columns('''label''' )
snake_case__ : Union[str, Any] = trainer.predict(UpperCamelCase__ , metric_key_prefix='''predict''' ).predictions
snake_case__ : Optional[Any] = np.argmax(UpperCamelCase__ , axis=1 )
snake_case__ : str = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(UpperCamelCase__ ):
snake_case__ : Tuple = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
snake_case__ : List[str] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def __UpperCAmelCase ( UpperCamelCase__ :List[Any] ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 574
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class __snake_case ( _lowercase):
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : Union[str, Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = np.array(
[
B'''This is the first record''',
B'''This is the second record''',
B'''This is the third record''',
B'''This is the fourth record''',
B'''This is the fifth record''',
B'''This is a longer longer longer record''',
] , dtype=__lowerCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.get_config()
_lowerCamelCase : Dict = self.get_dummy_retriever()
_lowerCamelCase : List[Any] = retriever.tokenizer
_lowerCamelCase : str = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Optional[int] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Dict = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(len(__lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Dict = self.get_dummy_retriever()
_lowerCamelCase : List[Any] = retriever.tokenizer
_lowerCamelCase : Optional[int] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : Dict = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[Any] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = retriever(
__lowerCAmelCase , __lowerCAmelCase , answer_ids=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : str = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : List[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , B'''This is the first record''' )
| 83
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCamelCase ( unittest.TestCase ):
def __A ( self ):
A__ = 10
def __A ( self ):
A__ = [1, 2, 3, 4]
A__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCAmelCase__ , self.block_size , 0 ) , UpperCAmelCase__ )
def __A ( self ):
A__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = ""
A__ , A__ = process_story(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , [] )
self.assertEqual(UpperCAmelCase__ , [] )
def __A ( self ):
A__ = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
A__ , A__ = process_story(UpperCAmelCase__ )
A__ = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = ["It was the best of times."]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4] )
A__ = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 0 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 23 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCAmelCase__ , 1 ).numpy() , expected.numpy() )
def __A ( self ):
A__ = 101
A__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A__ = compute_token_type_ids(UpperCAmelCase__ , UpperCAmelCase__ )
np.testing.assert_array_equal(UpperCAmelCase__ , UpperCAmelCase__ )
| 491
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 2_55 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**__a )
_snake_case : int = size if size is not None else {'shortest_edge': 2_24}
_snake_case : str = get_size_dict(__a , default_to_square=__a )
_snake_case : Tuple = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_snake_case : str = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
_snake_case : Any = do_resize
_snake_case : Optional[int] = size
_snake_case : int = resample
_snake_case : List[Any] = do_center_crop
_snake_case : Tuple = crop_size
_snake_case : Union[str, Any] = do_rescale
_snake_case : int = rescale_factor
_snake_case : Tuple = do_normalize
_snake_case : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case : Any = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case : int = do_convert_rgb
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_snake_case : List[str] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_snake_case : Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_snake_case : List[Any] = size if size is not None else self.size
_snake_case : Any = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
_snake_case : Optional[int] = resample if resample is not None else self.resample
_snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : str = crop_size if crop_size is not None else self.crop_size
_snake_case : Optional[int] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
_snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
_snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
_snake_case : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case : str = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_snake_case : List[Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
_snake_case : Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
_snake_case : Union[str, Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
_snake_case : Any = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_snake_case : Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_snake_case : int = [to_channel_dimension_format(__a , __a ) for image in images]
_snake_case : Tuple = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 717
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCAmelCase_ = '''scheduler_config.json'''
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 5
@dataclass
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_ = ['dtype']
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = True
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
_snake_case , _snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case , _snake_case : Dict = cls.from_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , """create_state""" ) and getattr(SCREAMING_SNAKE_CASE__ , """has_state""" , SCREAMING_SNAKE_CASE__ ):
_snake_case : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
self.save_config(save_directory=SCREAMING_SNAKE_CASE__ , push_to_hub=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def __lowerCamelCase( cls ):
"""simple docstring"""
_snake_case : Dict = list(set([cls.__name__] + cls._compatibles ) )
_snake_case : Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
_snake_case : List[str] = [
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
]
return compatible_classes
def UpperCAmelCase ( A__ , A__ ) -> jnp.ndarray:
assert len(A__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A__ ) - x.ndim) ) , A__ )
def UpperCAmelCase ( A__ , A__=0.999 , A__=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(A__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_snake_case : List[Any] = []
for i in range(A__ ):
_snake_case : Optional[Any] = i / num_diffusion_timesteps
_snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A__ ) / alpha_bar(A__ ) , A__ ) )
return jnp.array(A__ , dtype=A__ )
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = scheduler.config
if config.trained_betas is not None:
_snake_case : Optional[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_snake_case : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_snake_case : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_snake_case : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
_snake_case : str = 1.0 - betas
_snake_case : Dict = jnp.cumprod(SCREAMING_SNAKE_CASE__ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE__ , betas=SCREAMING_SNAKE_CASE__ , alphas_cumprod=SCREAMING_SNAKE_CASE__ , )
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> List[str]:
_snake_case : Union[str, Any] = state.alphas_cumprod
_snake_case : Tuple = alphas_cumprod[timesteps] ** 0.5
_snake_case : Union[str, Any] = sqrt_alpha_prod.flatten()
_snake_case : Optional[Any] = broadcast_to_shape_from_left(A__ , original_samples.shape )
_snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
_snake_case : List[str] = sqrt_one_minus_alpha_prod.flatten()
_snake_case : Any = broadcast_to_shape_from_left(A__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> Optional[int]:
_snake_case , _snake_case : Optional[Any] = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
_snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase ( A__ , A__ , A__ , A__ ) -> int:
_snake_case , _snake_case : Union[str, Any] = get_sqrt_alpha_prod(A__ , A__ , A__ , A__ )
_snake_case : List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 519
| 0
|
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 105
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = value
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = tree
def a ( self , snake_case__ ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 444
| 0
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__lowerCAmelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__lowerCAmelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__lowerCAmelCase = BeautifulSoup(res.text, 'html.parser')
__lowerCAmelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 666
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 666
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionSAGPipeline
snake_case =TEXT_TO_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
else:
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_UpperCAmelCase ={
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase =np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
_UpperCAmelCase =sag_pipe.to(_snake_case )
sag_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="."
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sag_pipe(
[prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape == (1, 512, 768, 3)
| 408
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase__ : int = quote(UpperCamelCase__ )
return hfh.hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" , revision=UpperCamelCase__ )
| 707
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__A =logging.get_logger(__name__)
__A =Dict[str, Any]
__A =List[Prediction]
@add_end_docstrings(a__ )
class _snake_case ( a__ ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
super().__init__(*_lowerCamelCase , **_lowerCamelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , """vision""")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def snake_case__ ( self , **_lowerCamelCase):
UpperCAmelCase__ : int = {}
if "threshold" in kwargs:
UpperCAmelCase__ : List[str] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *_lowerCamelCase , **_lowerCamelCase):
return super().__call__(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = load_image(_lowerCamelCase)
UpperCAmelCase__ : Dict = torch.IntTensor([[image.height, image.width]])
UpperCAmelCase__ : str = self.image_processor(images=[image] , return_tensors="""pt""")
if self.tokenizer is not None:
UpperCAmelCase__ : Tuple = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""")
UpperCAmelCase__ : str = target_size
return inputs
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = model_inputs.pop("""target_size""")
UpperCAmelCase__ : Optional[Any] = self.model(**_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = outputs.__class__({"""target_size""": target_size, **outputs})
if self.tokenizer is not None:
UpperCAmelCase__ : int = model_inputs["""bbox"""]
return model_outputs
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=0.9):
UpperCAmelCase__ : Optional[Any] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = target_size[0].tolist()
def unnormalize(_lowerCamelCase):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = model_outputs["""logits"""].squeeze(0).softmax(dim=-1).max(dim=-1)
UpperCAmelCase__ : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase__ : Tuple = [unnormalize(_lowerCamelCase) for bbox in model_outputs["""bbox"""].squeeze(0)]
UpperCAmelCase__ : Tuple = ["""score""", """label""", """box"""]
UpperCAmelCase__ : Dict = [dict(zip(_lowerCamelCase , _lowerCamelCase)) for vals in zip(scores.tolist() , _lowerCamelCase , _lowerCamelCase) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase__ : Union[str, Any] = self.image_processor.post_process_object_detection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : int = raw_annotations[0]
UpperCAmelCase__ : Any = raw_annotation["""scores"""]
UpperCAmelCase__ : List[str] = raw_annotation["""labels"""]
UpperCAmelCase__ : str = raw_annotation["""boxes"""]
UpperCAmelCase__ : Tuple = scores.tolist()
UpperCAmelCase__ : int = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase__ : int = [self._get_bounding_box(_lowerCamelCase) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase__ : Dict = ["""score""", """label""", """box"""]
UpperCAmelCase__ : Optional[int] = [
dict(zip(_lowerCamelCase , _lowerCamelCase))
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""])
]
return annotation
def snake_case__ ( self , _lowerCamelCase):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""")
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = box.int().tolist()
UpperCAmelCase__ : Dict = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 113
| 0
|
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
A: str = 9
A: Union[str, Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A: List[Any] = kruskal(lowerCamelCase__ , lowerCamelCase__ )
A: int = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ )
| 135
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 135
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __A (_UpperCamelCase):
'''simple docstring'''
__lowercase: str = """mvp"""
__lowercase: Any = ["""past_key_values"""]
__lowercase: List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , UpperCAmelCase_ : Tuple=50_267 , UpperCAmelCase_ : Any=1_024 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[str]=4_096 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : str=12 , UpperCAmelCase_ : List[Any]=4_096 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[Any]=1_024 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[Any]=100 , UpperCAmelCase_ : str=800 , **UpperCAmelCase_ : Tuple , ) ->int:
"""simple docstring"""
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = classifier_dropout
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = use_prompt
snake_case_ = prompt_length
snake_case_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase_ ):
snake_case_ = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 714
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case_ = i + 1
else:
snake_case_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 2
| 0
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a_ ( __lowercase ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """OwlViTImageProcessor"""
lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="max_length" , _SCREAMING_SNAKE_CASE="np" , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE_ )):
UpperCamelCase = [self.tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(text[0] , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = []
# Maximum number of queries across batch
UpperCamelCase = max([len(SCREAMING_SNAKE_CASE_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE_ ) != max_num_queries:
UpperCamelCase = t + [""" """] * (max_num_queries - len(SCREAMING_SNAKE_CASE_ ))
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
encodings.append(SCREAMING_SNAKE_CASE_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCamelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCamelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCamelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCamelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCamelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCamelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCamelCase = BatchEncoding()
UpperCamelCase = input_ids
UpperCamelCase = attention_mask
if query_images is not None:
UpperCamelCase = BatchEncoding()
UpperCamelCase = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).pixel_values
UpperCamelCase = query_pixel_values
if images is not None:
UpperCamelCase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 301
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase ( __a, __a, __a, __a = 100, ):
SCREAMING_SNAKE_CASE_ = x_start
SCREAMING_SNAKE_CASE_ = fnc(__a )
SCREAMING_SNAKE_CASE_ = 0.0
for _ in range(__a ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
SCREAMING_SNAKE_CASE_ = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE_ = fnc(__a )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
SCREAMING_SNAKE_CASE_ = xa
SCREAMING_SNAKE_CASE_ = fxa
return area
if __name__ == "__main__":
def _lowerCamelCase ( __a ):
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
lowerCAmelCase__ = 10
while i <= 100000:
print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 10
| 626
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCAmelCase_ : Any = logging.getLogger(__name__)
UpperCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : List[Any] = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=A , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=A , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=A , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=A , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=A , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=A , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=A , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=A , default=2**1_8 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=A , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=A , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=A , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=A , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=A , default=5_1_2 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=A , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=A , required=A , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=A , help='Model ID to upload to on the Hugging Face Hub.' )
_a : Tuple = parser.parse_args()
return args
def UpperCAmelCase_ ( A ):
'''simple docstring'''
try:
if args.tpu_name:
_a : Any = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(A )
tf.tpu.experimental.initialize_tpu_system(A )
return tpu
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Union[str, Any] = 0
for file in file_list:
_a : int = file.split('/' )[-1]
_a : Any = re.search(r'-\d+-(\d+)\.tfrecord' , A ).group(1 )
_a : Dict = int(A )
num_samples += sample_count
return num_samples
def UpperCAmelCase_ ( A , A , A , A , A , A=None ):
'''simple docstring'''
_a : Union[str, Any] = count_samples(A )
_a : Optional[int] = tf.data.Dataset.from_tensor_slices(A )
if shuffle:
_a : Dict = dataset.shuffle(len(A ) )
_a : Optional[int] = tf.data.TFRecordDataset(A , num_parallel_reads=A )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a : Dict = dataset.apply(tf.data.experimental.assert_cardinality(A ) )
_a : str = dataset.map(A , num_parallel_calls=A )
if shuffle:
assert shuffle_buffer_size is not None
_a : Union[str, Any] = dataset.shuffle(args.shuffle_buffer_size )
_a : int = dataset.batch(A , drop_remainder=A )
_a : Optional[Any] = dataset.map(A , num_parallel_calls=A )
_a : int = dataset.prefetch(A )
return dataset
def UpperCAmelCase_ ( A ):
'''simple docstring'''
if not args.no_tpu:
_a : Union[str, Any] = initialize_tpu(A )
_a : Optional[int] = tf.distribute.TPUStrategy(A )
else:
_a : Any = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
_a : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer )
_a : int = AutoConfig.from_pretrained(args.pretrained_model_config )
_a : Union[str, Any] = tokenizer.vocab_size
_a : List[str] = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
_a : Tuple = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
_a : Optional[int] = count_samples(A )
_a : Union[str, Any] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a : Optional[int] = steps_per_epoch * args.num_epochs
with strategy.scope():
_a : Union[str, Any] = TFAutoModelForMaskedLM.from_config(A )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a : str = create_optimizer(
num_train_steps=A , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=A , metrics=['accuracy'] )
def decode_fn(A ):
_a : int = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(A , A )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=A , mlm_probability=args.mlm_probability , mlm=A , return_tensors='tf' )
def mask_with_collator(A ):
# TF really needs an isin() function
_a : str = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
_a , _a : str = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(A ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=A , )
return batch
_a : Tuple = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a : List[str] = prepare_dataset(
A , decode_fn=A , mask_fn=A , batch_size=A , shuffle=A , shuffle_buffer_size=args.shuffle_buffer_size , )
_a : Dict = prepare_dataset(
A , decode_fn=A , mask_fn=A , batch_size=A , shuffle=A , )
_a : Tuple = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=A ) )
model.fit(
A , validation_data=A , epochs=args.num_epochs , callbacks=A , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = parse_args()
main(args)
| 424
|
'''simple docstring'''
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 424
| 1
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 276
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Optional[int] = ['image_processor', 'tokenizer']
_A : List[Any] = 'LayoutLMv2ImageProcessor'
_A : Dict = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase , )
snake_case__ = kwargs.pop("feature_extractor" )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 0 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , **lowerCamelCase , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case__ = self.image_processor(images=lowerCamelCase , return_tensors=lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case__ = features["words"]
snake_case__ = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
# add pixel values
snake_case__ = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case__ = self.get_overflowing_images(lowerCamelCase , encoded_inputs["overflow_to_sample_mapping"] )
snake_case__ = images
return encoded_inputs
def A_ ( self , lowerCamelCase , lowerCamelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
snake_case__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F""" {len(lowerCamelCase )} and {len(lowerCamelCase )}""" )
return images_with_overflow
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def A_ ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A_ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase , )
return self.image_processor_class
@property
def A_ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase , )
return self.image_processor
| 276
| 1
|
'''simple docstring'''
from PIL import Image
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Image:
"""simple docstring"""
def brightness(__SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 201
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Dict =["""image_processor""", """feature_extractor"""]
a_ :str ="""TvltImageProcessor"""
a_ :str ="""TvltFeatureExtractor"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
__a = image_processor
__a = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=False , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__a = None
if images is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , mask_pixel=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images_mixed is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , is_mixed=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audio is not None:
__a = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , mask_audio=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
return output_dict
@property
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.image_processor.model_input_names
__a = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 201
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCAmelCase = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_UpperCAmelCase = list(s_dict.keys() )
for key in keys:
_UpperCAmelCase = r".*/layers_(\d+)"
_UpperCAmelCase = key
if re.match(_A , _A ):
_UpperCAmelCase = re.sub(r"layers_(\d+)" , r"block/\1/layer" , _A )
_UpperCAmelCase = r"(encoder|decoder)\/"
if re.match(_A , _A ):
_UpperCAmelCase = re.match(_A , _A ).groups()
if groups[0] == "encoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/1/mlp/" , _A )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , _A )
elif groups[0] == "decoder":
_UpperCAmelCase = re.sub(r"/mlp/" , r"/2/mlp/" , _A )
_UpperCAmelCase = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , _A )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_UpperCAmelCase = new_key.replace(_A , _A )
print(F'''{key} -> {new_key}''' )
_UpperCAmelCase = s_dict.pop(_A )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_UpperCAmelCase = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_UpperCAmelCase = s_dict[key].shape[0]
_UpperCAmelCase = s_dict[key]
for idx in range(_A ):
_UpperCAmelCase = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(_A )
return s_dict
__lowerCAmelCase = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_A , "r" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = re.findall(r"(.*) = ([0-9.]*)" , _A )
_UpperCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_UpperCAmelCase = float(_A ) if "." in value else int(_A )
_UpperCAmelCase = re.findall(r"(.*activations) = \(\'(.*)\',\)" , _A )[0]
_UpperCAmelCase = str(activation[1] )
_UpperCAmelCase = num_experts
_UpperCAmelCase = SwitchTransformersConfig(**_A )
return config
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="./" , _lowerCAmelCase=8 ) -> str:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(_A )
if gin_file is not None:
_UpperCAmelCase = convert_gin_to_config(_A , _A )
else:
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained(_A )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration(_A )
_UpperCAmelCase = flax_params["target"]
_UpperCAmelCase = flatten_dict(_A , sep="/" )
_UpperCAmelCase = rename_keys(_A )
_UpperCAmelCase = unflatten_dict(_A , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_A , _A )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(_A )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__lowerCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 684
|
def __UpperCamelCase ( _A ):
if isinstance(_A , _A ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(_A , _A ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowerCAmelCase_ = False
if num < 0:
lowerCAmelCase_ = True
lowerCAmelCase_ = -num
lowerCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_A ) for e in binary )
return "0b" + "".join(str(_A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase__ : Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__lowerCamelCase ) ,torch_builtin(__lowerCamelCase ) ) )
self.assertFalse(torch.allclose(gelu_python(__lowerCamelCase ) ,gelu_new(__lowerCamelCase ) ) )
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
lowerCAmelCase__ : str = get_activation('''gelu''' )
lowerCAmelCase__ : Optional[int] = get_activation('''gelu_10''' )
lowerCAmelCase__ : List[Any] = torch_builtin(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = geluaa(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__lowerCamelCase ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__lowerCamelCase ):
get_activation('''bogus''' )
with self.assertRaises(__lowerCamelCase ):
get_activation(__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_activation('''gelu''' )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Union[str, Any] = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__lowerCamelCase ):
lowerCAmelCase__ : List[str] = acta.a
| 714
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : str =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""pixel_values"""]
def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : str = crop_size
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : int = do_convert_rgb
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 90
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Dict= test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'{test_file} instead.' )
SCREAMING_SNAKE_CASE__: Optional[int]= components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
SCREAMING_SNAKE_CASE__: int= components[:-1] + [test_fn.replace('''.py''' , '''''' )]
SCREAMING_SNAKE_CASE__: List[str]= '''.'''.join(snake_case_ )
return test_module_path
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: Dict= get_module_path(snake_case_ )
SCREAMING_SNAKE_CASE__: int= importlib.import_module(snake_case_ )
return test_module
def A__ ( snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Any= []
SCREAMING_SNAKE_CASE__: Optional[Any]= get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(snake_case_ , snake_case_ ) )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
SCREAMING_SNAKE_CASE__: Dict= get_test_module(snake_case_ )
for attr in dir(snake_case_ ):
SCREAMING_SNAKE_CASE__: str= getattr(snake_case_ , snake_case_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
SCREAMING_SNAKE_CASE__: List[Any]= getattr(snake_case_ , '''all_model_classes''' , [] )
if len(snake_case_ ) > 0:
test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : int ):
SCREAMING_SNAKE_CASE__: List[str]= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : str ):
SCREAMING_SNAKE_CASE__: Tuple= test_class()
if hasattr(snake_case_ , '''setUp''' ):
test.setUp()
SCREAMING_SNAKE_CASE__: List[str]= None
if hasattr(snake_case_ , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
SCREAMING_SNAKE_CASE__: List[Any]= test.model_tester.__class__
return model_tester
def A__ ( snake_case_ : List[Any] , snake_case_ : List[str] ):
SCREAMING_SNAKE_CASE__: Tuple= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Dict= []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Dict , snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: str= get_test_classes_for_model(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE__: Union[str, Any]= []
for test_class in test_classes:
SCREAMING_SNAKE_CASE__: Optional[Any]= get_model_tester_from_test_class(snake_case_ )
if tester_class is not None:
tester_classes.append(snake_case_ )
# sort with class names
return sorted(snake_case_ , key=lambda snake_case_ : x.__name__ )
def A__ ( snake_case_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__: List[str]= get_test_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= {test_class: get_model_tester_from_test_class(snake_case_ ) for test_class in test_classes}
return test_tester_mapping
def A__ ( snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE__: List[str]= get_model_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: Any= {
model_class: get_test_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_test_mapping
def A__ ( snake_case_ : Dict ):
SCREAMING_SNAKE_CASE__: int= get_model_classes(snake_case_ )
SCREAMING_SNAKE_CASE__: List[str]= {
model_class: get_tester_classes_for_model(snake_case_ , snake_case_ ) for model_class in model_classes
}
return model_to_tester_mapping
def A__ ( snake_case_ : Union[str, Any] ):
if isinstance(snake_case_ , snake_case_ ):
return o
elif isinstance(snake_case_ , snake_case_ ):
return o.__name__
elif isinstance(snake_case_ , (list, tuple) ):
return [to_json(snake_case_ ) for x in o]
elif isinstance(snake_case_ , snake_case_ ):
return {to_json(snake_case_ ): to_json(snake_case_ ) for k, v in o.items()}
else:
return o
| 64
|
def _A ( lowerCamelCase ):
a__ : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _A ( lowerCamelCase ):
a__ : List[Any] = 0
while number > 0:
a__ : str = number % 10
sum_of_digits += last_digit
a__ : Dict = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _A ( lowerCamelCase = 100 ):
a__ : Any = factorial(lowerCamelCase )
a__ : Optional[Any] = split_and_add(lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 112
| 0
|
'''simple docstring'''
class __A :
def __init__(self : Any , __a : int , __a : List[Any]=None , __a : Union[str, Any]=None ):
UpperCAmelCase_ = data
UpperCAmelCase_ = previous
UpperCAmelCase_ = next_node
def __str__(self : int ):
return f"""{self.data}"""
def _lowercase (self : Optional[int] ):
return self.data
def _lowercase (self : Optional[int] ):
return self.next
def _lowercase (self : Optional[Any] ):
return self.previous
class __A :
def __init__(self : Optional[int] , __a : int ):
UpperCAmelCase_ = head
def __iter__(self : Optional[Any] ):
return self
def _lowercase (self : Dict ):
if not self.current:
raise StopIteration
else:
UpperCAmelCase_ = self.current.get_data()
UpperCAmelCase_ = self.current.get_next()
return value
class __A :
def __init__(self : str ):
UpperCAmelCase_ = None # First node in list
UpperCAmelCase_ = None # Last node in list
def __str__(self : Union[str, Any] ):
UpperCAmelCase_ = self.head
UpperCAmelCase_ = []
while current is not None:
nodes.append(current.get_data() )
UpperCAmelCase_ = current.get_next()
return " ".join(str(__a ) for node in nodes )
def __contains__(self : int , __a : int ):
UpperCAmelCase_ = self.head
while current:
if current.get_data() == value:
return True
UpperCAmelCase_ = current.get_next()
return False
def __iter__(self : Optional[int] ):
return LinkedListIterator(self.head )
def _lowercase (self : List[str] ):
if self.head:
return self.head.get_data()
return None
def _lowercase (self : Tuple ):
if self.tail:
return self.tail.get_data()
return None
def _lowercase (self : int , __a : Node ):
if self.head is None:
UpperCAmelCase_ = node
UpperCAmelCase_ = node
else:
self.insert_before_node(self.head , __a )
def _lowercase (self : List[str] , __a : Node ):
if self.head is None:
self.set_head(__a )
else:
self.insert_after_node(self.tail , __a )
def _lowercase (self : int , __a : int ):
UpperCAmelCase_ = Node(__a )
if self.head is None:
self.set_head(__a )
else:
self.set_tail(__a )
def _lowercase (self : Optional[Any] , __a : Node , __a : Node ):
UpperCAmelCase_ = node
UpperCAmelCase_ = node.previous
if node.get_previous() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def _lowercase (self : List[Any] , __a : Node , __a : Node ):
UpperCAmelCase_ = node
UpperCAmelCase_ = node.next
if node.get_next() is None:
UpperCAmelCase_ = node_to_insert
else:
UpperCAmelCase_ = node_to_insert
UpperCAmelCase_ = node_to_insert
def _lowercase (self : Optional[int] , __a : int , __a : int ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = Node(__a )
UpperCAmelCase_ = self.head
while node:
if current_position == position:
self.insert_before_node(__a , __a )
return
current_position += 1
UpperCAmelCase_ = node.next
self.insert_after_node(self.tail , __a )
def _lowercase (self : Tuple , __a : int ):
UpperCAmelCase_ = self.head
while node:
if node.get_data() == item:
return node
UpperCAmelCase_ = node.get_next()
raise Exception("Node not found" )
def _lowercase (self : List[str] , __a : str ):
if (node := self.get_node(__a )) is not None:
if node == self.head:
UpperCAmelCase_ = self.head.get_next()
if node == self.tail:
UpperCAmelCase_ = self.tail.get_previous()
self.remove_node_pointers(__a )
@staticmethod
def _lowercase (__a : Node ):
if node.get_next():
UpperCAmelCase_ = node.previous
if node.get_previous():
UpperCAmelCase_ = node.next
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def _lowercase (self : Optional[Any] ):
return self.head is None
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Any =['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415
| 1
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
A = logging.getLogger()
def __UpperCAmelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = os.path.join(__A , "all_results.json" )
if os.path.exists(__A ):
with open(__A , "r" ) as f:
UpperCAmelCase__ = json.load(__A )
else:
raise ValueError(F"""can't find {path}""" )
return results
A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
import xla_spawn
UpperCAmelCase__ = self.get_auto_remove_tmp_dir()
UpperCAmelCase__ = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_lowercase , "argv" , _lowercase ):
UpperCAmelCase__ = time()
xla_spawn.main()
UpperCAmelCase__ = time()
UpperCAmelCase__ = get_results(_lowercase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
import xla_spawn
UpperCAmelCase__ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(_lowercase , "argv" , _lowercase ):
xla_spawn.main()
| 475
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 475
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCamelCase :
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case: Union[str, Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_snake_case: List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_snake_case: Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_snake_case: List[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_snake_case: List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case: List[Any] = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_snake_case: Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
_snake_case: Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_snake_case: Any = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=__snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
_snake_case: Dict = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
_snake_case: str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: str = self.get_dummy_components()
_snake_case: Dict = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: Tuple = self.get_dummy_inputs(__snake_case )
_snake_case: Tuple = inputs['prompt']
_snake_case: str = inputs['generator']
_snake_case: Dict = inputs['num_inference_steps']
_snake_case: Optional[int] = inputs['output_type']
if "image" in inputs:
_snake_case: Union[str, Any] = inputs['image']
else:
_snake_case: List[str] = None
if "mask_image" in inputs:
_snake_case: Union[str, Any] = inputs['mask_image']
else:
_snake_case: Union[str, Any] = None
if "original_image" in inputs:
_snake_case: List[Any] = inputs['original_image']
else:
_snake_case: Optional[int] = None
_snake_case , _snake_case: Union[str, Any] = pipe.encode_prompt(__snake_case )
# inputs with prompt converted to embeddings
_snake_case: List[str] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_snake_case: Dict = image
if mask_image is not None:
_snake_case: Optional[Any] = mask_image
if original_image is not None:
_snake_case: Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__snake_case , __snake_case , __snake_case )
_snake_case: Union[str, Any] = pipe(**__snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__snake_case )
_snake_case: Dict = self.pipeline_class.from_pretrained(__snake_case )
pipe_loaded.to(__snake_case )
pipe_loaded.set_progress_bar_config(disable=__snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__snake_case , __snake_case ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
_snake_case: int = self.get_dummy_inputs(__snake_case )
_snake_case: List[Any] = inputs['generator']
_snake_case: Optional[int] = inputs['num_inference_steps']
_snake_case: Optional[Any] = inputs['output_type']
# inputs with prompt converted to embeddings
_snake_case: List[str] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
_snake_case: Tuple = image
if mask_image is not None:
_snake_case: Dict = mask_image
if original_image is not None:
_snake_case: Dict = original_image
_snake_case: Any = pipe_loaded(**__snake_case )[0]
_snake_case: Dict = np.abs(to_np(__snake_case ) - to_np(__snake_case ) ).max()
self.assertLess(__snake_case , 1e-4 )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: List[Any] = self.get_dummy_components()
_snake_case: Dict = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_snake_case: int = self.get_dummy_inputs(__snake_case )
_snake_case: Optional[int] = pipe(**__snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__snake_case )
_snake_case: str = self.pipeline_class.from_pretrained(__snake_case )
pipe_loaded.to(__snake_case )
pipe_loaded.set_progress_bar_config(disable=__snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_snake_case: Any = self.get_dummy_inputs(__snake_case )
_snake_case: Optional[int] = pipe_loaded(**__snake_case )[0]
_snake_case: Tuple = np.abs(to_np(__snake_case ) - to_np(__snake_case ) ).max()
self.assertLess(__snake_case , 1e-4 )
| 273
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Any = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''markuplm'''
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=256 , _lowerCAmelCase=1024 , _lowerCAmelCase=216 , _lowerCAmelCase=1001 , _lowerCAmelCase=32 , _lowerCAmelCase=50 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE: Any = vocab_size
__SCREAMING_SNAKE_CASE: int = hidden_size
__SCREAMING_SNAKE_CASE: List[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: int = num_attention_heads
__SCREAMING_SNAKE_CASE: Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE: Tuple = intermediate_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE: Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: List[Any] = initializer_range
__SCREAMING_SNAKE_CASE: Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE: Tuple = use_cache
__SCREAMING_SNAKE_CASE: List[str] = classifier_dropout
# additional properties
__SCREAMING_SNAKE_CASE: Union[str, Any] = max_depth
__SCREAMING_SNAKE_CASE: Dict = max_xpath_tag_unit_embeddings
__SCREAMING_SNAKE_CASE: List[Any] = max_xpath_subs_unit_embeddings
__SCREAMING_SNAKE_CASE: Dict = tag_pad_id
__SCREAMING_SNAKE_CASE: Optional[int] = subs_pad_id
__SCREAMING_SNAKE_CASE: int = xpath_unit_hidden_size
| 202
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''data2vec-vision'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=[3, 5, 7, 11] , _lowerCAmelCase=[1, 2, 3, 6] , _lowerCAmelCase=True , _lowerCAmelCase=0.4 , _lowerCAmelCase=256 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=255 , **_lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = hidden_size
__SCREAMING_SNAKE_CASE: Tuple = num_hidden_layers
__SCREAMING_SNAKE_CASE: Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE: int = intermediate_size
__SCREAMING_SNAKE_CASE: Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE: int = hidden_dropout_prob
__SCREAMING_SNAKE_CASE: str = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE: Any = initializer_range
__SCREAMING_SNAKE_CASE: Union[str, Any] = layer_norm_eps
__SCREAMING_SNAKE_CASE: Optional[Any] = image_size
__SCREAMING_SNAKE_CASE: List[str] = patch_size
__SCREAMING_SNAKE_CASE: Optional[int] = num_channels
__SCREAMING_SNAKE_CASE: List[Any] = use_mask_token
__SCREAMING_SNAKE_CASE: Tuple = use_absolute_position_embeddings
__SCREAMING_SNAKE_CASE: Any = use_relative_position_bias
__SCREAMING_SNAKE_CASE: Dict = use_shared_relative_position_bias
__SCREAMING_SNAKE_CASE: Any = layer_scale_init_value
__SCREAMING_SNAKE_CASE: List[Any] = drop_path_rate
__SCREAMING_SNAKE_CASE: int = use_mean_pooling
# decode head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Union[str, Any] = out_indices
__SCREAMING_SNAKE_CASE: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
__SCREAMING_SNAKE_CASE: Optional[int] = use_auxiliary_head
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_loss_weight
__SCREAMING_SNAKE_CASE: Optional[Any] = auxiliary_channels
__SCREAMING_SNAKE_CASE: Optional[int] = auxiliary_num_convs
__SCREAMING_SNAKE_CASE: Any = auxiliary_concat_input
__SCREAMING_SNAKE_CASE: List[str] = semantic_loss_ignore_index
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self ):
"""simple docstring"""
return 1e-4
| 202
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'beit'
def __init__( self : Optional[Any] , snake_case__ : List[Any]=8_192 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : Optional[int]=3_072 , snake_case__ : Optional[int]="gelu" , snake_case__ : str=0.0 , snake_case__ : Optional[Any]=0.0 , snake_case__ : int=0.0_2 , snake_case__ : Optional[int]=1E-12 , snake_case__ : Dict=224 , snake_case__ : str=16 , snake_case__ : Optional[Any]=3 , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : Tuple=False , snake_case__ : Dict=False , snake_case__ : List[str]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=True , snake_case__ : List[Any]=[3, 5, 7, 11] , snake_case__ : Optional[int]=[1, 2, 3, 6] , snake_case__ : str=True , snake_case__ : Any=0.4 , snake_case__ : Dict=256 , snake_case__ : str=1 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=255 , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return 1E-4
| 712
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase = DisjunctiveConstraint(snake_case__ )
self.assertTrue(isinstance(dc.token_ids , snake_case__ ) )
with self.assertRaises(snake_case__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(snake_case__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__lowerCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(snake_case__ ):
DisjunctiveConstraint(snake_case__ ) # fails here
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase = DisjunctiveConstraint(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(snake_case__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
__lowerCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(snake_case__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 )
__lowerCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(snake_case__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase = DisjunctiveConstraint(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 376
| 0
|
import argparse
import json
import subprocess
def _SCREAMING_SNAKE_CASE ( __lowercase : Any , __lowercase : str ) -> Optional[int]:
"""simple docstring"""
__A = []
__A = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
__A = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
__A = output.stdout.decode("""utf-8""" )
__A = json.loads(_UpperCamelCase )
__A = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
__A = '''\n'''.join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( __lowercase : List[str] ) -> str:
"""simple docstring"""
return values.split(""",""" )
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__a : str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 637
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[0, 1, 2, 3] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=[1, 384, 24, 24] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , ):
lowercase : Optional[Any] = parent
lowercase : Tuple = batch_size
lowercase : Any = image_size
lowercase : Union[str, Any] = patch_size
lowercase : Optional[Any] = num_channels
lowercase : Optional[int] = is_training
lowercase : Dict = use_labels
lowercase : Union[str, Any] = hidden_size
lowercase : Dict = num_hidden_layers
lowercase : Optional[Any] = backbone_out_indices
lowercase : Dict = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : Union[str, Any] = initializer_range
lowercase : str = num_labels
lowercase : str = backbone_featmap_shape
lowercase : Optional[int] = scope
lowercase : int = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] = (image_size // patch_size) ** 2
lowercase : int = num_patches + 1
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
lowercase : List[str] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=SCREAMING_SNAKE_CASE__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = DPTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = self.num_labels
lowercase : Optional[Any] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.num_labels
lowercase : List[str] = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCamelCase ( self ):
lowercase : List[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : str = False
A : Union[str, Any] = False
A : List[Any] = False
def __lowerCamelCase ( self ):
lowercase : List[str] = DPTModelTester(self )
lowercase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : str = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def __lowerCamelCase ( self ):
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict = model_class(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] = [*signature.parameters.keys()]
lowercase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
continue
lowercase : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
lowercase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict = False
lowercase : Optional[int] = True
if model_class in get_values(SCREAMING_SNAKE_CASE__ ) or not model_class.supports_gradient_checkpointing:
continue
lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.gradient_checkpointing_enable()
model.train()
lowercase : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = model(**SCREAMING_SNAKE_CASE__ ).loss
loss.backward()
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Optional[Any] = _config_zero_init(SCREAMING_SNAKE_CASE__ )
for model_class in self.all_model_classes:
lowercase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
# Skip the check for the backbone
lowercase : Tuple = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase : List[Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCamelCase ( self ):
pass
@slow
def __lowerCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase : Any = DPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Any = '''add'''
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = DPTForDepthEstimation(SCREAMING_SNAKE_CASE__ )
def __lowercase ( ) ->List[Any]:
"""simple docstring"""
lowercase : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase : Optional[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Any = prepare_img()
lowercase : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
lowercase : Any = model(**SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = outputs.predicted_depth
# verify the predicted depth
lowercase : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 319
| 0
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=13 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=99 , SCREAMING_SNAKE_CASE_ : List[str]=64 , SCREAMING_SNAKE_CASE_ : int=32 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=37 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any=512 , SCREAMING_SNAKE_CASE_ : List[Any]=16 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : str=0.0_2 , SCREAMING_SNAKE_CASE_ : Tuple=3 , SCREAMING_SNAKE_CASE_ : List[str]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Union[str, Any] ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = MobileBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCamelCase__ = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCamelCase__ = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : int ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
lowerCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = MobileBertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _A ( __lowercase ):
"""simple docstring"""
return torch.tensor(
__lowercase , dtype=torch.long , device=__lowercase , )
__magic_name__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase__ = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=SCREAMING_SNAKE_CASE_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCamelCase__ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 258
|
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 258
| 1
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default=lowerCAmelCase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCamelCase_ = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 558
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCamelCase : Union[str, Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Tuple = "sshleifer/tiny-gpt2"
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : List[str] = "sgugger/tiny-distilbert-classification"
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
lowerCamelCase : Tuple = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = "sshleifer/tiny-gpt2"
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Any = "sshleifer/tiny-gpt2"
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = "sshleifer/tiny-gpt2"
lowerCamelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = "sshleifer/tiny-gpt2"
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : int = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[str] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : Tuple = "sshleifer/tinier_bart"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Any = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : int = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[str] = "sshleifer/tinier_bart"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Tuple = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
lowerCamelCase : str = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCamelCase : Any = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 311
| 0
|
"""simple docstring"""
def A ( snake_case :int ) -> bool:
__UpperCamelCase = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A ( snake_case :int = 5_0_0_0 ) -> int:
__UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case )]
for i, pentagonal_i in enumerate(snake_case ):
for j in range(snake_case , len(snake_case ) ):
__UpperCamelCase = pentagonal_nums[j]
__UpperCamelCase = pentagonal_i + pentagonal_j
__UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case ) and is_pentagonal(snake_case ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 293
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase : Tuple = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase : Dict = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def A ( snake_case :List[str] , snake_case :Optional[int] , snake_case :Optional[Any] ) -> Any:
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(snake_case , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
__UpperCamelCase = json.load(snake_case )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(snake_case )] )
with open(snake_case , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case )
if strict and len(snake_case ) > 0:
raise Exception(f'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops )
elif len(snake_case ) > 0:
print(f'Found the following incompatible ops for the opset {opset}:' )
print(*snake_case , sep='\n' )
else:
print(f'The saved model {saved_model_path} can properly be converted with ONNX.' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=1_2, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
UpperCamelCase : str = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 293
| 1
|
"""simple docstring"""
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Dict = []
__UpperCAmelCase : List[Any] = 1
while len(SCREAMING_SNAKE_CASE__ ) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE__ ) )
i += 1
__UpperCAmelCase : Union[str, Any] = "".join(SCREAMING_SNAKE_CASE__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 77
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class snake_case_ ( a_ ):
__lowerCAmelCase = (DEISMultistepScheduler,)
__lowerCAmelCase = (("num_inference_steps", 2_5),)
def snake_case_ ( self , **a_ ):
a_ : Optional[Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**a_ )
return config
def snake_case_ ( self , a_=0 , **a_ ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Union[str, Any] = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : Union[str, Any] = 0.1 * sample
a_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Any = self.get_scheduler_config(**a_ )
a_ : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : Dict = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
a_ : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ , a_ : List[str] = sample, sample
for t in range(a_ , time_step + scheduler.config.solver_order + 1 ):
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : List[str] = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , a_=0 , **a_ ):
a_ : List[str] = dict(self.forward_default_kwargs )
a_ : Dict = kwargs.pop("num_inference_steps" , a_ )
a_ : List[str] = self.dummy_sample
a_ : str = 0.1 * sample
a_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
a_ : Union[str, Any] = self.get_scheduler_config()
a_ : Optional[Any] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
a_ : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
a_ : List[Any] = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
a_ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
a_ : Optional[int] = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Tuple = new_scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case_ ( self , a_=None , **a_ ):
if scheduler is None:
a_ : Optional[Any] = self.scheduler_classes[0]
a_ : Dict = self.get_scheduler_config(**a_ )
a_ : Union[str, Any] = scheduler_class(**a_ )
a_ : Optional[int] = self.scheduler_classes[0]
a_ : List[str] = self.get_scheduler_config(**a_ )
a_ : Tuple = scheduler_class(**a_ )
a_ : Optional[int] = 1_0
a_ : Optional[Any] = self.dummy_model()
a_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : str = model(a_ , a_ )
a_ : str = scheduler.step(a_ , a_ , a_ ).prev_sample
return sample
def snake_case_ ( self ):
a_ : Union[str, Any] = dict(self.forward_default_kwargs )
a_ : Tuple = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
a_ : List[Any] = self.get_scheduler_config()
a_ : str = scheduler_class(**a_ )
a_ : Any = self.dummy_sample
a_ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
a_ : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
a_ : str = dummy_past_residuals[: scheduler.config.solver_order]
a_ : str = scheduler.timesteps[5]
a_ : Dict = scheduler.timesteps[6]
a_ : Dict = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
a_ : Any = scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
a_ : Dict = DEISMultistepScheduler(**self.get_scheduler_config() )
a_ : List[str] = self.full_loop(scheduler=a_ )
a_ : Tuple = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
a_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a_ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
a_ : Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
a_ : Any = DEISMultistepScheduler.from_config(scheduler.config )
a_ : str = self.full_loop(scheduler=a_ )
a_ : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def snake_case_ ( self ):
self.check_over_configs(thresholding=a_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=a_ , prediction_type=a_ , sample_max_value=a_ , algorithm_type="deis" , solver_order=a_ , solver_type=a_ , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def snake_case_ ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
a_ : List[Any] = self.full_loop(
solver_order=a_ , solver_type=a_ , prediction_type=a_ , algorithm_type=a_ , )
assert not torch.isnan(a_ ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=a_ )
self.check_over_configs(lower_order_final=a_ )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=a_ , time_step=0 )
def snake_case_ ( self ):
a_ : str = self.full_loop()
a_ : Dict = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def snake_case_ ( self ):
a_ : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
a_ : Union[str, Any] = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def snake_case_ ( self ):
a_ : List[str] = self.scheduler_classes[0]
a_ : str = self.get_scheduler_config(thresholding=a_ , dynamic_thresholding_ratio=0 )
a_ : Dict = scheduler_class(**a_ )
a_ : int = 1_0
a_ : List[str] = self.dummy_model()
a_ : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
a_ : Optional[int] = model(a_ , a_ )
a_ : Optional[Any] = scheduler.step(a_ , a_ , a_ ).prev_sample
assert sample.dtype == torch.floataa
| 237
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
if len(_lowerCamelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowerCamelCase__ : Tuple = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__(self, lowerCamelCase_ = 1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = True, ):
'''simple docstring'''
if audio_length_in_s is None:
lowerCamelCase__ : str = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase__ : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase__ : str = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCamelCase__ : Dict = int(lowerCamelCase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase__ : Union[str, Any] = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCamelCase__ : Optional[Any] = int(lowerCamelCase_ )
lowerCamelCase__ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCamelCase_, lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase__ : Union[str, Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=self.device, dtype=lowerCamelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_, device=audio.device )
lowerCamelCase__ : int = self.scheduler.timesteps.to(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase__ : List[Any] = self.unet(lowerCamelCase_, lowerCamelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase__ : List[str] = self.scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ).prev_sample
lowerCamelCase__ : Union[str, Any] = audio.clamp(-1, 1 ).float().cpu().numpy()
lowerCamelCase__ : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCamelCase_ )
| 696
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 1_00 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ) -> Any:
_a = False
_a = search_prob
_a = start_temperate
_a = []
_a = 0
_a = None
while not search_end:
_a = current_state.score()
if best_state is None or current_score > best_state.score():
_a = current_state
scores.append(_UpperCamelCase )
iterations += 1
_a = None
_a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
_a = neighbors.pop(_UpperCamelCase )
_a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a = picked_neighbor
else:
_a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a = picked_neighbor
_a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a = True
else:
_a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase :Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :List[str] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase :Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
return (3 * x**2) - (6 * y)
lowerCamelCase :Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
lowerCamelCase :Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 487
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[str]=18 , UpperCAmelCase_ : Optional[Any]=30 , UpperCAmelCase_ : Tuple=400 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=True , )-> Dict:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Any:
"""simple docstring"""
UpperCamelCase = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "apply_ocr" ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Optional[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Dict )-> int:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Any )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
# with apply_OCR = True
UpperCamelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCamelCase = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCamelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
UpperCamelCase = image_processing(UpperCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 554
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Any = VQModel
UpperCamelCase_ : Dict = '''sample'''
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : List[str]=(32, 32) )-> List[str]:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
return {"sample": image}
@property
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return (3, 32, 32)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Any )-> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
UpperCamelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCamelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(UpperCAmelCase_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCamelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCamelCase = image.to(UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
| 554
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = "bloom"
lowerCAmelCase : Any = ["past_key_values"]
lowerCAmelCase : Union[str, Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Any , lowerCamelCase__ : List[Any]=25_08_80 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=8 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=1 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Any=False , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop("n_embed" , lowerCamelCase__ )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Optional[Any] = n_layer
_UpperCAmelCase : Dict = n_head
_UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : Dict = pretraining_tp
_UpperCAmelCase : int = apply_residual_connection_post_layernorm
_UpperCAmelCase : str = hidden_dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : List[str] = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.12" )
def __init__( self : List[Any] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : str = "default" , lowerCamelCase__ : List[PatchingSpec] = None , lowerCamelCase__ : bool = False , ) ->int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCAmelCase : Union[str, Any] = 0
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" , inverted_values_shape=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self._config.n_head
@property
def lowerCAmelCase__ ( self : Any ) ->float:
'''simple docstring'''
return 1E-3
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : "PreTrainedTokenizer" , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Optional[Any] = seqlen + 2
_UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase : Optional[int] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return 13
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase ):
A = StableDiffusionXLImgaImgPipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
A = PipelineTesterMixin.required_optional_params - {"latents"}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
A = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ : int = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=32 , )
lowerCamelCase_ : str = CLIPTextModel(lowercase_ )
lowerCamelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowercase_ )
lowerCamelCase_ : int = CLIPTextModelWithProjection(lowercase_ )
lowerCamelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=lowercase_ )
lowerCamelCase_ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCamelCase_ : List[Any] = image / 2 + 0.5
if str(lowercase_ ).startswith('''mps''' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(lowercase_ )
else:
lowerCamelCase_ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCamelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.75,
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any = self.get_dummy_components()
lowerCamelCase_ : List[str] = StableDiffusionXLImgaImgPipeline(**lowercase_ )
lowerCamelCase_ : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCamelCase_ : str = self.get_dummy_inputs(lowercase_ )
lowerCamelCase_ : Union[str, Any] = sd_pipe(**lowercase_ ).images
lowerCamelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ : str = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase_ : Dict = StableDiffusionXLImgaImgPipeline(**lowercase_ )
lowerCamelCase_ : Any = sd_pipe.to(lowercase_ )
lowerCamelCase_ : Optional[int] = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
# forward without prompt embeds
lowerCamelCase_ : str = self.get_dummy_inputs(lowercase_ )
lowerCamelCase_ : Tuple = 3 * ['''this is a negative prompt''']
lowerCamelCase_ : Dict = negative_prompt
lowerCamelCase_ : Optional[Any] = 3 * [inputs['''prompt''']]
lowerCamelCase_ : Tuple = sd_pipe(**lowercase_ )
lowerCamelCase_ : Any = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ : Dict = self.get_dummy_inputs(lowercase_ )
lowerCamelCase_ : List[Any] = 3 * ['''this is a negative prompt''']
lowerCamelCase_ : int = 3 * [inputs.pop('''prompt''' )]
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : Any = sd_pipe.encode_prompt(lowercase_ , negative_prompt=lowercase_ )
lowerCamelCase_ : Optional[Any] = sd_pipe(
**lowercase_ , prompt_embeds=lowercase_ , negative_prompt_embeds=lowercase_ , pooled_prompt_embeds=lowercase_ , negative_pooled_prompt_embeds=lowercase_ , )
lowerCamelCase_ : List[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]="cpu" , UpperCamelCase_ : int=torch.floataa , UpperCamelCase_ : List[Any]=0 ) -> int:
"""simple docstring"""
lowerCamelCase_ : Tuple = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCamelCase_ : Optional[Any] = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ : Union[str, Any] = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ )
lowerCamelCase_ : Optional[Any] = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Dict = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCamelCase_ : int = self.get_inputs(lowercase_ )
lowerCamelCase_ : List[str] = pipe(**lowercase_ ).images
lowerCamelCase_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 501
|
'''simple docstring'''
__lowerCAmelCase : List[str] ="Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A__ = Stack()
A__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
A__ = operator_stack.peek()
operator_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operand_stack.peek()
operand_stack.pop()
A__ = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowerCAmelCase : Dict ="(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 440
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ = get_tests_dir('''fixtures''')
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = mock.Mock()
lowerCAmelCase__ = 500
lowerCAmelCase__ = {}
lowerCAmelCase__ = HTTPError
lowerCAmelCase__ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase ) as mock_head:
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__UpperCAmelCase )
@is_staging_test
class lowercase__ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = TOKEN
HfFolder.save_token(__UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls )-> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(__UpperCAmelCase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCAmelCase , repo_id="test-image-processor" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = ViTImageProcessor.from_pretrained(__UpperCAmelCase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token )
lowerCAmelCase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowerCAmelCase__ = CustomImageProcessor.from_pretrained(__UpperCAmelCase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowerCAmelCase__ = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor" , trust_remote_code=__UpperCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 115
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
a_ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
a_ = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
a_ = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
a_ = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
a_ = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(_UpperCAmelCase )
class lowercase__ :
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , )-> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [titles]
lowerCAmelCase__ = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [texts]
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase ) else [questions] * n_passages
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise ValueError(
F"There should be as many titles than texts but got {len(__UpperCAmelCase )} titles and {len(__UpperCAmelCase )} texts." )
lowerCAmelCase__ = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
lowerCAmelCase__ = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )["input_ids"]
lowerCAmelCase__ = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase )
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase__ = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = 64 , __UpperCAmelCase = 4 , )-> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase__ = reader_input["input_ids"]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = sorted(range(__UpperCAmelCase ) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__ )
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , )-> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(__UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase__ = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[1] , reverse=__UpperCAmelCase )
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]" )
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase, _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =READER_PRETRAINED_VOCAB_FILES_MAP
a_ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =READER_PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
| 115
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self , lowercase , lowercase=1_4 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=5 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent
__SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : List[str] = use_token_type_ids
__SCREAMING_SNAKE_CASE : List[str] = use_input_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
__SCREAMING_SNAKE_CASE : Tuple = use_mc_token_ids
__SCREAMING_SNAKE_CASE : Optional[int] = vocab_size
__SCREAMING_SNAKE_CASE : int = hidden_size
__SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
__SCREAMING_SNAKE_CASE : Any = num_attention_heads
__SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
__SCREAMING_SNAKE_CASE : Any = num_labels
__SCREAMING_SNAKE_CASE : List[str] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : int = self.vocab_size - 1
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : str = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
if self.use_mc_token_ids:
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self ) -> int:
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = CTRLModel(config=lowercase )
model.to(lowercase )
model.eval()
model(lowercase , token_type_ids=lowercase , head_mask=lowercase )
model(lowercase , token_type_ids=lowercase )
__SCREAMING_SNAKE_CASE : Any = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = CTRLLMHeadModel(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : int = config_and_inputs
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , snake_case , unittest.TestCase ):
__a : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__a : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__a : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Dict = True
__a : List[str] = False
__a : Dict = False
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = CTRLModelTester(self )
__SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=lowercase , n_embd=3_7 )
def _snake_case ( self ) -> Any:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase )
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
pass
@slow
def _snake_case ( self ) -> List[Any]:
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : List[str] = CTRLModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _snake_case ( self ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=lowercase ) # Legal the president is
__SCREAMING_SNAKE_CASE : int = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__SCREAMING_SNAKE_CASE : Optional[int] = model.generate(lowercase , do_sample=lowercase )
self.assertListEqual(output_ids[0].tolist() , lowercase )
| 158
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE : List[str] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
__SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase , lowercase )
def _snake_case ( self , **lowercase ) -> Tuple:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self , **lowercase ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _snake_case ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : int = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Dict = image_processor(lowercase , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : str = processor(images=lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=lowercase )
__SCREAMING_SNAKE_CASE : str = tokenizer(lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''lower newer'''
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(lowercase ):
processor()
def _snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : int = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Optional[Any] = processor.batch_decode(lowercase )
__SCREAMING_SNAKE_CASE : str = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = VisionTextDualEncoderProcessor(tokenizer=lowercase , image_processor=lowercase )
__SCREAMING_SNAKE_CASE : str = '''lower newer'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 158
| 1
|
def __lowerCAmelCase ( __snake_case , __snake_case ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__lowerCAmelCase = str(bin(__snake_case ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(__snake_case ) )[2:]
__lowerCAmelCase = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCamelCase (a_ ):
snake_case_ = """biogpt"""
def __init__( self , __UpperCamelCase=4_2_3_8_4 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=2_4 , __UpperCamelCase=1_6 , __UpperCamelCase=4_0_9_6 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-12 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , )-> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = scale_embedding
__lowerCAmelCase = use_cache
__lowerCAmelCase = layerdrop
__lowerCAmelCase = activation_dropout
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
| 290
| 0
|
"""simple docstring"""
from __future__ import annotations
_lowerCAmelCase = list[tuple[int, int]]
_lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_lowerCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class UpperCamelCase :
def __init__( self :Tuple , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :int , __magic_name__ :float , __magic_name__ :Node | None , ) ->Dict:
lowercase : Optional[int] = pos_x
lowercase : Tuple = pos_y
lowercase : Any = (pos_y, pos_x)
lowercase : Optional[Any] = goal_x
lowercase : str = goal_y
lowercase : Tuple = g_cost
lowercase : Optional[Any] = parent
lowercase : List[Any] = self.calculate_heuristic()
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Dict = abs(self.pos_x - self.goal_x )
lowercase : Optional[int] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self :Union[str, Any] , __magic_name__ :int ) ->int:
return self.f_cost < other.f_cost
class UpperCamelCase :
def __init__( self :List[str] , __magic_name__ :tuple[int, int] , __magic_name__ :tuple[int, int] ) ->Optional[Any]:
lowercase : str = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a__ )
lowercase : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , a__ )
lowercase : Any = [self.start]
lowercase : Dict = []
lowercase : List[str] = False
def __snake_case ( self :Union[str, Any] ) ->Tuple:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowercase : Tuple = True
return self.retrace_path(a__ )
self.closed_nodes.append(a__ )
lowercase : Dict = self.get_successors(a__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a__ )
else:
# retrieve the best current path
lowercase : int = self.open_nodes.pop(self.open_nodes.index(a__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a__ )
else:
self.open_nodes.append(a__ )
if not self.reached:
return [self.start.pos]
return None
def __snake_case ( self :Any , __magic_name__ :Node ) ->List[str]:
lowercase : int = []
for action in delta:
lowercase : Optional[Any] = parent.pos_x + action[1]
lowercase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a__ , a__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a__ , ) )
return successors
def __snake_case ( self :Dict , __magic_name__ :Node | None ) ->Tuple:
lowercase : Optional[int] = node
lowercase : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase : List[str] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_lowerCAmelCase = (0, 0)
_lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
_lowerCAmelCase = GreedyBestFirst(init, goal)
_lowerCAmelCase = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_lowerCAmelCase = 2
for elem in grid:
print(elem)
| 264
|
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,a__ : List[str] ,a__ : str ):
a__ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
a__ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
a__ = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
a__ = (1 << len(a__ )) - 1
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : Tuple ,a__ : Optional[int] ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
a__ = self.count_ways_until(a__ ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
a__ = total_ways_util
return self.dp[mask][task_no]
def lowerCAmelCase_ ( self : Dict ,a__ : Union[str, Any] ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
UpperCamelCase_ : Dict = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
UpperCamelCase_ : List[str] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 331
| 0
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
A_ = VideoMAEConfig()
set_architecture_configs(__lowercase, __lowercase )
if "finetuned" not in model_name:
A_ = False
if "finetuned" in model_name:
A_ = 'huggingface/label-files'
if "kinetics" in model_name:
A_ = 4_00
A_ = 'kinetics400-id2label.json'
elif "ssv2" in model_name:
A_ = 1_74
A_ = 'something-something-v2-id2label.json'
else:
raise ValueError('''Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.''' )
A_ = json.load(open(hf_hub_download(__lowercase, __lowercase, repo_type='''dataset''' ), '''r''' ) )
A_ = {int(__lowercase ): v for k, v in idalabel.items()}
A_ = idalabel
A_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : Union[str, Any] ) -> Optional[int]:
if "small" in model_name:
A_ = 3_84
A_ = 15_36
A_ = 12
A_ = 16
A_ = 12
A_ = 3
A_ = 1_92
A_ = 7_68
elif "large" in model_name:
A_ = 10_24
A_ = 40_96
A_ = 24
A_ = 16
A_ = 12
A_ = 8
A_ = 5_12
A_ = 20_48
elif "huge" in model_name:
A_ = 12_80
A_ = 51_20
A_ = 32
A_ = 16
A_ = 12
A_ = 8
A_ = 6_40
A_ = 25_60
elif "base" not in model_name:
raise ValueError('''Model name should include either "small", "base", "large", or "huge"''' )
def _UpperCAmelCase ( _UpperCamelCase : Any ) -> int:
if "encoder." in name:
A_ = name.replace('''encoder.''', '''''' )
if "cls_token" in name:
A_ = name.replace('''cls_token''', '''videomae.embeddings.cls_token''' )
if "decoder_pos_embed" in name:
A_ = name.replace('''decoder_pos_embed''', '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
A_ = name.replace('''pos_embed''', '''videomae.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
A_ = name.replace('''patch_embed.proj''', '''videomae.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A_ = name.replace('''patch_embed.norm''', '''videomae.embeddings.norm''' )
if "decoder.blocks" in name:
A_ = name.replace('''decoder.blocks''', '''decoder.decoder_layers''' )
if "blocks" in name:
A_ = name.replace('''blocks''', '''videomae.encoder.layer''' )
if "attn.proj" in name:
A_ = name.replace('''attn.proj''', '''attention.output.dense''' )
if "attn" in name and "bias" not in name:
A_ = name.replace('''attn''', '''attention.self''' )
if "attn" in name:
A_ = name.replace('''attn''', '''attention.attention''' )
if "norm1" in name:
A_ = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
A_ = name.replace('''norm2''', '''layernorm_after''' )
if "mlp.fc1" in name:
A_ = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
A_ = name.replace('''mlp.fc2''', '''output.dense''' )
if "decoder_embed" in name:
A_ = name.replace('''decoder_embed''', '''decoder.decoder_embed''' )
if "decoder_norm" in name:
A_ = name.replace('''decoder_norm''', '''decoder.decoder_norm''' )
if "decoder_pred" in name:
A_ = name.replace('''decoder_pred''', '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace('''norm.weight''', '''videomae.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ = name.replace('''norm.bias''', '''videomae.layernorm.bias''' )
if "head" in name and "decoder" not in name:
A_ = name.replace('''head''', '''classifier''' )
return name
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : Any ) -> Any:
for key in orig_state_dict.copy().keys():
A_ = orig_state_dict.pop(__lowercase )
if key.startswith('''encoder.''' ):
A_ = key.replace('''encoder.''', '''''' )
if "qkv" in key:
A_ = key.split('''.''' )
if key.startswith('''decoder.blocks''' ):
A_ = config.decoder_hidden_size
A_ = int(key_split[2] )
A_ = 'decoder.decoder_layers.'
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = config.hidden_size
A_ = int(key_split[1] )
A_ = 'videomae.encoder.layer.'
if "weight" in key:
A_ = val[:dim, :]
A_ = val[dim : dim * 2, :]
A_ = val[-dim:, :]
else:
A_ = val
return orig_state_dict
def _UpperCAmelCase ( ) -> List[Any]:
A_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' )
A_ = np.load(__lowercase )
return list(__lowercase )
def _UpperCAmelCase ( _UpperCamelCase : Dict, _UpperCamelCase : Any, _UpperCamelCase : Union[str, Any], _UpperCamelCase : Any ) -> Dict:
A_ = get_videomae_config(__lowercase )
if "finetuned" in model_name:
A_ = VideoMAEForVideoClassification(__lowercase )
else:
A_ = VideoMAEForPreTraining(__lowercase )
# download original checkpoint, hosted on Google Drive
A_ = 'pytorch_model.bin'
gdown.cached_download(__lowercase, __lowercase, quiet=__lowercase )
A_ = torch.load(__lowercase, map_location='''cpu''' )
if "model" in files:
A_ = files['model']
else:
A_ = files['module']
A_ = convert_state_dict(__lowercase, __lowercase )
model.load_state_dict(__lowercase )
model.eval()
# verify model on basic input
A_ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
A_ = prepare_video()
A_ = image_processor(__lowercase, return_tensors='''pt''' )
if "finetuned" not in model_name:
A_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
A_ = torch.load(__lowercase )
A_ = model(**__lowercase )
A_ = outputs.logits
A_ = [
'videomae-small-finetuned-kinetics',
'videomae-small-finetuned-ssv2',
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
'videomae-base-short',
'videomae-base-short-finetuned-kinetics',
'videomae-base',
'videomae-base-finetuned-kinetics',
'videomae-large',
'videomae-large-finetuned-kinetics',
'videomae-huge-finetuned-kinetics',
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
'videomae-base-short-ssv2',
'videomae-base-short-finetuned-ssv2',
'videomae-base-ssv2',
'videomae-base-finetuned-ssv2',
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ = torch.Size([1, 4_00] )
A_ = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
A_ = torch.Size([1, 14_08, 15_36] )
A_ = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ = torch.Size([1, 1_74] )
A_ = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3], __lowercase, atol=1E-4 )
else:
print('''Logits:''', logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3], __lowercase, atol=1E-4 )
print('''Logits ok!''' )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ = outputs.loss
assert torch.allclose(__lowercase, __lowercase, atol=1E-4 )
print('''Loss ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
model.save_pretrained(__lowercase )
if push_to_hub:
print('''Pushing to the hub...''' )
model.push_to_hub(__lowercase, organization='''nielsr''' )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case : Optional[int] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 704
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__snake_case : int = 'examples/'
__snake_case : Dict = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__snake_case : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
__snake_case : int = 'README.md'
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any], _UpperCamelCase : List[str] ) -> int:
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.read()
A_ ,A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace('''VERSION''', _UpperCamelCase )
A_ = re_pattern.sub(_UpperCamelCase, _UpperCamelCase )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any ) -> int:
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_UpperCamelCase, _UpperCamelCase ), _UpperCamelCase, pattern='''examples''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _UpperCAmelCase ( ) -> Dict:
A_ = '''🤗 Transformers currently provides the following architectures'''
A_ = '''1. Want to contribute a new model?'''
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
def _UpperCAmelCase ( ) -> List[Any]:
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : str=False ) -> Union[str, Any]:
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase, patch=_UpperCamelCase )
def _UpperCAmelCase ( ) -> int:
A_ = get_version()
A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__snake_case : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 174
| 0
|
import datasets
from .evaluate import evaluate
_UpperCAmelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCAmelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCAmelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Dict )->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def UpperCAmelCase__ ( self : int , _snake_case : int , _snake_case : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__lowerCAmelCase : Dict = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase : Optional[Any] = evaluate(dataset=_snake_case , predictions=_snake_case )
return score
| 504
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int = 4_000_000 ) -> int:
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = b, a + b
return sum(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 504
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A : Dict = mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ )
else:
A : Dict = max(
mf_knapsack(i - 1 , snake_case__ , snake_case__ , snake_case__ ) , mf_knapsack(i - 1 , snake_case__ , snake_case__ , j - wt[i - 1] ) + val[i - 1] , )
A : Tuple = val
return f[i][j]
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[str] = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A : Any = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A : List[Any] = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if not (isinstance(snake_case__ , (list, tuple) ) and isinstance(snake_case__ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
A : Tuple = len(snake_case__ )
if num_items != len(snake_case__ ):
A : Dict = (
'''The number of weights must be the same as the number of values.\n'''
F'But got {num_items} weights and {len(snake_case__ )} values'
)
raise ValueError(snake_case__ )
for i in range(snake_case__ ):
if not isinstance(wt[i] , snake_case__ ):
A : Tuple = (
'''All weights must be integers but got weight of '''
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(snake_case__ )
A : Tuple = knapsack(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
A : set = set()
_construct_solution(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return optimal_val, example_optional_set
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(snake_case__ , snake_case__ , i - 1 , snake_case__ , snake_case__ )
else:
optimal_set.add(snake_case__ )
_construct_solution(snake_case__ , snake_case__ , i - 1 , j - wt[i - 1] , snake_case__ )
if __name__ == "__main__":
lowercase = [3, 2, 4, 4]
lowercase = [4, 3, 2, 3]
lowercase = 4
lowercase = 6
lowercase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 704
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase : str = logging.get_logger(__name__)
# TODO: upload to AWS
lowercase : Optional[Any] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''retribert'''
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : Any = num_hidden_layers
A : Any = num_attention_heads
A : List[Any] = hidden_act
A : Any = intermediate_size
A : str = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : List[Any] = max_position_embeddings
A : Tuple = type_vocab_size
A : Optional[Any] = initializer_range
A : Union[str, Any] = layer_norm_eps
A : Dict = share_encoders
A : Dict = projection_dim
| 343
| 0
|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
__lowerCamelCase :str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCamelCase :int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase)} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''})
snake_case__ : bool =field(default=__lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''})
snake_case__ : float =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''})
snake_case__ : float =field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
snake_case__ : int =field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''})
snake_case__ : int =field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def snake_case ( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ) -> Optional[int]:
def _dataset(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCamelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : Dict = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCamelCase : Tuple = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase : int = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : List[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : Dict = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : Any = {"""perplexity""": perplexity}
lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def snake_case ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 222
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Tuple , __a: Tuple , __a: Tuple=13 , __a: Tuple=7 , __a: List[str]=True , __a: int=True , __a: Optional[int]=True , __a: Union[str, Any]=True , __a: int=99 , __a: int=32 , __a: List[Any]=5 , __a: Union[str, Any]=4 , __a: Optional[int]=37 , __a: Dict="gelu" , __a: Dict=0.1 , __a: Any=0.1 , __a: str=512 , __a: int=16 , __a: Any=2 , __a: int=0.02 , __a: List[str]=False , __a: Optional[int]=True , __a: List[Any]="None" , __a: List[Any]=3 , __a: Optional[int]=4 , __a: str=None , )-> str:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : List[str] = use_input_mask
lowerCamelCase : Optional[Any] = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : Dict = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : int = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Tuple = num_labels
lowerCamelCase : int = num_choices
lowerCamelCase : int = relative_attention
lowerCamelCase : Union[str, Any] = position_biased_input
lowerCamelCase : Optional[int] = pos_att_type
lowerCamelCase : Optional[int] = scope
def a__ ( self: Union[str, Any] )-> List[str]:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[int] = None
if self.use_input_mask:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase : Tuple = None
if self.use_token_type_ids:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : List[Any] = None
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def a__ ( self: int , __a: int )-> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def a__ ( self: Tuple , __a: Union[str, Any] , __a: int , __a: Dict , __a: List[str] , __a: Tuple , __a: Any , __a: Optional[int] )-> str:
lowerCamelCase : Optional[int] = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Dict = model(__a , attention_mask=__a , token_type_ids=__a )[0]
lowerCamelCase : Optional[Any] = model(__a , token_type_ids=__a )[0]
lowerCamelCase : Union[str, Any] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def a__ ( self: Union[str, Any] , __a: Any , __a: Optional[Any] , __a: Any , __a: Optional[Any] , __a: Tuple , __a: int , __a: List[str] )-> str:
lowerCamelCase : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[Any] , __a: List[Any] , __a: Any , __a: Tuple , __a: int , __a: Optional[Any] , __a: str , __a: List[Any] )-> List[str]:
lowerCamelCase : List[str] = self.num_labels
lowerCamelCase : Optional[int] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def a__ ( self: List[str] , __a: List[str] , __a: Optional[int] , __a: Optional[Any] , __a: Dict , __a: Union[str, Any] , __a: List[str] , __a: Optional[Any] )-> List[Any]:
lowerCamelCase : Optional[int] = self.num_labels
lowerCamelCase : Any = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] , __a: Dict , __a: int , __a: Tuple , __a: Optional[Any] , __a: List[Any] , __a: List[Any] , __a: Union[str, Any] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self: Any , __a: int , __a: List[str] , __a: str , __a: int , __a: str , __a: Optional[Any] , __a: List[str] )-> Tuple:
lowerCamelCase : Tuple = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase : Union[str, Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : int = config_and_inputs
lowerCamelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case__ : int =(
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : List[str] =True
snake_case__ : Optional[int] =False
snake_case__ : Dict =False
snake_case__ : Union[str, Any] =False
snake_case__ : List[str] =False
def a__ ( self: Dict )-> Union[str, Any]:
lowerCamelCase : List[str] = DebertaVaModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: Dict )-> List[str]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Any:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def a__ ( self: List[str] )-> List[Any]:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def a__ ( self: Optional[int] )-> Dict:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def a__ ( self: Any )-> Any:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def a__ ( self: Any )-> int:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
pass
@slow
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : Dict = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
lowerCamelCase : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 222
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_UpperCAmelCase : List[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 108
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
'''simple docstring'''
model.train()
snake_case_ = model(UpperCamelCase__ )
snake_case_ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(UpperCamelCase__ )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def UpperCamelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
A__ = int(np.ceil((x_end - xa) / step_size ) )
A__ = np.zeros((n + 1,) )
A__ = ya
A__ = xa
for k in range(_lowerCamelCase ):
A__ = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : list[int] , _lowerCamelCase : float ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
def UpperCamelCase ( ):
A__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
A__ = math.log(len(_lowerCamelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 440
| 1
|
"""simple docstring"""
a : str = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( lowerCamelCase_):
_lowerCAmelCase = '''decision_transformer'''
_lowerCAmelCase = ['''past_key_values''']
_lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=17, A=4, A=128, A=4096, A=True, A=1, A=1024, A=3, A=1, A=None, A="relu", A=0.1, A=0.1, A=0.1, A=1e-5, A=0.02, A=True, A=True, A=5_0256, A=5_0256, A=False, A=False, **A, ):
"""simple docstring"""
lowerCamelCase : Dict = state_dim
lowerCamelCase : Optional[Any] = act_dim
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Any = max_ep_len
lowerCamelCase : Tuple = action_tanh
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = n_positions
lowerCamelCase : Optional[Any] = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : Any = n_inner
lowerCamelCase : int = activation_function
lowerCamelCase : Any = resid_pdrop
lowerCamelCase : Optional[int] = embd_pdrop
lowerCamelCase : int = attn_pdrop
lowerCamelCase : Dict = layer_norm_epsilon
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Union[str, Any] = scale_attn_weights
lowerCamelCase : Tuple = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Tuple = reorder_and_upcast_attn
lowerCamelCase : Optional[Any] = bos_token_id
lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
| 320
|
"""simple docstring"""
import operator
def __snake_case ( _lowercase ,_lowercase = False ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = operator.lt if reverse else operator.gt
UpperCamelCase = solution or []
if not arr:
return solution
UpperCamelCase = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase ,sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
UpperCamelCase = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase ,_lowercase ):
solution.insert(_lowercase ,_lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase ,_lowercase ,_lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 34
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __lowerCAmelCase (__lowerCAmelCase = "." ):
for dir_path, dir_names, filenames in os.walk(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCAmelCase , __lowerCAmelCase ).lstrip("./" )
def __lowerCAmelCase (__lowerCAmelCase ):
return F"""{i * ' '}*""" if i else "\n##"
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(__lowerCAmelCase )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def __lowerCAmelCase (__lowerCAmelCase = "." ):
_UpperCAmelCase : Tuple = ""
for filepath in sorted(good_file_paths(__lowerCAmelCase ) ):
_UpperCAmelCase : List[Any] = os.path.split(__lowerCAmelCase )
if filepath != old_path:
_UpperCAmelCase : Union[str, Any] = print_path(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
_UpperCAmelCase : Tuple = F"""{filepath}/{filename}""".replace(" " , "%20" )
_UpperCAmelCase : Any = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"""{md_prefix(__lowerCAmelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
a__ : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[str]) -> None:
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 622
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Dict = {"vocab_file": "spiece.model"}
a__ : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
a__ : Union[str, Any] = {
"albert-base-v1": 5_12,
"albert-large-v1": 5_12,
"albert-xlarge-v1": 5_12,
"albert-xxlarge-v1": 5_12,
"albert-base-v2": 5_12,
"albert-large-v2": 5_12,
"albert-xlarge-v2": 5_12,
"albert-xxlarge-v2": 5_12,
}
a__ : Optional[int] = "▁"
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=False , lowerCAmelCase : str="[CLS]" , lowerCAmelCase : Optional[int]="[SEP]" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="[SEP]" , lowerCAmelCase : Dict="<pad>" , lowerCAmelCase : int="[CLS]" , lowerCAmelCase : Tuple="[MASK]" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> None:
"""simple docstring"""
lowercase__ = (
AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase , normalized=lowerCAmelCase)
if isinstance(lowerCAmelCase , lowerCAmelCase)
else mask_token
)
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase , remove_space=lowerCAmelCase , keep_accents=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCAmelCase)
@property
def UpperCAmelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model)
def UpperCAmelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : int , lowerCAmelCase : str) -> Any:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
if self.remove_space:
lowercase__ = ' '.join(inputs.strip().split())
else:
lowercase__ = inputs
lowercase__ = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
lowercase__ = unicodedata.normalize('NFKD' , lowerCAmelCase)
lowercase__ = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase)])
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> List[str]:
"""simple docstring"""
lowercase__ = self.preprocess_text(lowerCAmelCase)
lowercase__ = self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase)
lowercase__ = []
for piece in pieces:
if len(lowerCAmelCase) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(lowerCAmelCase)
else:
new_pieces.append(lowerCAmelCase)
return new_pieces
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> int:
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Any) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[Any]) -> List[Any]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
lowercase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase) + token
lowercase__ = True
lowercase__ = []
else:
current_sub_tokens.append(lowerCAmelCase)
lowercase__ = False
out_string += self.sp_model.decode(lowerCAmelCase)
return out_string.strip()
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase)
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase)) + [1] + ([0] * len(lowerCAmelCase)) + [1]
return [1] + ([0] * len(lowerCAmelCase)) + [1]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
lowercase__ = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase , 'wb') as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase)
return (out_vocab_file,)
| 622
| 1
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def snake_case__ ( _A: List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = int(lowerCAmelCase__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def snake_case__ ( _A: Tuple , _A: Tuple , _A: Dict , _A: Union[str, Any] , _A: Dict=300 ) -> Tuple:
'''simple docstring'''
return f"\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n "
def snake_case__ ( _A: Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
lowerCAmelCase = f"{elt:.6f}" if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else str(lowerCAmelCase__ )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class a__:
'''simple docstring'''
UpperCAmelCase_ : Any = 5
UpperCAmelCase_ : List[str] = 0.2
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 300 , ):
"""simple docstring"""
lowerCAmelCase = total
lowerCAmelCase = """""" if prefix is None else prefix
lowerCAmelCase = leave
lowerCAmelCase = parent
lowerCAmelCase = width
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = value
if comment is not None:
lowerCAmelCase = comment
if self.last_value is None:
lowerCAmelCase = lowerCAmelCase = time.time()
lowerCAmelCase = lowerCAmelCase = value
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = self.warmup
lowerCAmelCase = 1
self.update_bar(_a)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
lowerCAmelCase = time.time()
lowerCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
lowerCAmelCase = self.elapsed_time / (value - self.start_value)
else:
lowerCAmelCase = None
if value >= self.total:
lowerCAmelCase = self.total
lowerCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
lowerCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(_a)
lowerCAmelCase = value
lowerCAmelCase = current_time
if self.average_time_per_item is None:
lowerCAmelCase = 1
else:
lowerCAmelCase = max(int(self.update_every / self.average_time_per_item) , 1)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
lowerCAmelCase = """ """ * (len(str(self.total)) - len(str(_a))) + str(_a)
if self.elapsed_time is None:
lowerCAmelCase = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
lowerCAmelCase = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
lowerCAmelCase = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
lowerCAmelCase = disp.display(disp.HTML(self.html_code) , display_id=_a)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self):
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""""""))
class a__( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
super().__init__(_a)
lowerCAmelCase = None if column_names is None else [column_names]
lowerCAmelCase = None
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
lowerCAmelCase = disp.display(disp.HTML(self.html_code) , display_id=_a)
else:
self.output.update(disp.HTML(self.html_code))
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
if self.inner_table is None:
lowerCAmelCase = [list(values.keys()), list(values.values())]
else:
lowerCAmelCase = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a)
lowerCAmelCase = columns
self.inner_table.append([values[c] for c in columns])
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=300):
"""simple docstring"""
lowerCAmelCase = NotebookProgressBar(_a , prefix=_a , parent=self , width=_a)
return self.child_bar
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = None
self.display()
class a__( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""")
lowerCAmelCase = NotebookTrainingTracker(state.max_steps , _a)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if not has_length(_a):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
lowerCAmelCase = self.training_tracker.add_child(len(_a))
else:
lowerCAmelCase = NotebookProgressBar(len(_a))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
lowerCAmelCase = None
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
lowerCAmelCase = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
lowerCAmelCase = state.global_step
self.training_tracker.write_line(_a)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
if self.training_tracker is not None:
lowerCAmelCase = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history):
if "loss" in log:
lowerCAmelCase = log["""loss"""]
break
if self.first_column == "Epoch":
lowerCAmelCase = int(state.epoch)
else:
lowerCAmelCase = state.global_step
lowerCAmelCase = """eval"""
for k in metrics:
if k.endswith("""_loss"""):
lowerCAmelCase = re.sub(r"""\_loss$""" , """""" , _a)
lowerCAmelCase = metrics.pop("""total_flos""" , _a)
lowerCAmelCase = metrics.pop("""epoch""" , _a)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_runtime" , _a)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_samples_per_second" , _a)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_steps_per_second" , _a)
lowerCAmelCase = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , _a)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
lowerCAmelCase = v
else:
lowerCAmelCase = k.split("""_""")
lowerCAmelCase = """ """.join([part.capitalize() for part in splits[1:]])
lowerCAmelCase = v
self.training_tracker.write_line(_a)
self.training_tracker.remove_child()
lowerCAmelCase = None
# Evaluation takes a long time so we should force the next update.
lowerCAmelCase = True
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}" , force_update=_a)
lowerCAmelCase = None
| 721
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
if len(_A ) != 32:
raise ValueError("""Input must be of length 32""" )
lowerCAmelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case__ ( _A: int ) -> bytes:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase = format(_A , """08x""" )[-8:]
lowerCAmelCase = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
lowerCAmelCase = b""""""
for char in message:
bit_string += format(_A , """08b""" ).encode("""utf-8""" )
lowerCAmelCase = format(len(_A ) , """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_A ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case__ ( _A: bytes ) -> Generator[list[int], None, None]:
'''simple docstring'''
if len(_A ) % 512 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0 , len(_A ) , 512 ):
lowerCAmelCase = bit_string[pos : pos + 512]
lowerCAmelCase = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
lowerCAmelCase = format(_A , """032b""" )
lowerCAmelCase = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_A , 2 )
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
return (a + b) % 2**32
def snake_case__ ( _A: int , _A: int ) -> int:
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case__ ( _A: bytes ) -> bytes:
'''simple docstring'''
lowerCAmelCase = preprocess(_A )
lowerCAmelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowerCAmelCase = 0X6_7_4_5_2_3_0_1
lowerCAmelCase = 0Xe_f_c_d_a_b_8_9
lowerCAmelCase = 0X9_8_b_a_d_c_f_e
lowerCAmelCase = 0X1_0_3_2_5_4_7_6
lowerCAmelCase = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_A ):
lowerCAmelCase = aa
lowerCAmelCase = ba
lowerCAmelCase = ca
lowerCAmelCase = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase = d ^ (b & (c ^ d))
lowerCAmelCase = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase = c ^ (d & (b ^ c))
lowerCAmelCase = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase = b ^ c ^ d
lowerCAmelCase = (3 * i + 5) % 16
else:
lowerCAmelCase = c ^ (b | not_aa(_A ))
lowerCAmelCase = (7 * i) % 16
lowerCAmelCase = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase = d
lowerCAmelCase = c
lowerCAmelCase = b
lowerCAmelCase = sum_aa(_A , left_rotate_aa(_A , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = sum_aa(_A , _A )
lowerCAmelCase = reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A ) + reformat_hex(_A )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
snake_case : int = logging.get_logger(__name__)
snake_case : List[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '''layoutlmv3'''
def __init__( self :Optional[Any] ,__snake_case :Dict=5_02_65 ,__snake_case :Union[str, Any]=7_68 ,__snake_case :Dict=12 ,__snake_case :List[str]=12 ,__snake_case :Any=30_72 ,__snake_case :int="gelu" ,__snake_case :List[str]=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Any=2 ,__snake_case :Dict=0.02 ,__snake_case :Dict=1E-5 ,__snake_case :Tuple=1 ,__snake_case :Optional[int]=0 ,__snake_case :List[Any]=2 ,__snake_case :Optional[Any]=10_24 ,__snake_case :List[str]=1_28 ,__snake_case :List[str]=1_28 ,__snake_case :str=True ,__snake_case :Any=32 ,__snake_case :Union[str, Any]=1_28 ,__snake_case :Optional[Any]=64 ,__snake_case :List[Any]=2_56 ,__snake_case :Any=True ,__snake_case :Optional[int]=True ,__snake_case :List[str]=True ,__snake_case :Any=2_24 ,__snake_case :Union[str, Any]=3 ,__snake_case :int=16 ,__snake_case :Any=None ,**__snake_case :Dict ,) -> Any:
super().__init__(
vocab_size=__snake_case ,hidden_size=__snake_case ,num_hidden_layers=__snake_case ,num_attention_heads=__snake_case ,intermediate_size=__snake_case ,hidden_act=__snake_case ,hidden_dropout_prob=__snake_case ,attention_probs_dropout_prob=__snake_case ,max_position_embeddings=__snake_case ,type_vocab_size=__snake_case ,initializer_range=__snake_case ,layer_norm_eps=__snake_case ,pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = max_ad_position_embeddings
a__ = coordinate_size
a__ = shape_size
a__ = has_relative_attention_bias
a__ = rel_pos_bins
a__ = max_rel_pos
a__ = has_spatial_attention_bias
a__ = rel_ad_pos_bins
a__ = max_rel_ad_pos
a__ = text_embed
a__ = visual_embed
a__ = input_size
a__ = num_channels
a__ = patch_size
a__ = classifier_dropout
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.12''' )
@property
def lowerCamelCase__( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def lowerCamelCase__( self :Tuple ) -> float:
return 1E-5
@property
def lowerCamelCase__( self :Any ) -> int:
return 12
def lowerCamelCase__( self :Tuple ,__snake_case :"ProcessorMixin" ,__snake_case :int = -1 ,__snake_case :int = -1 ,__snake_case :bool = False ,__snake_case :Optional["TensorType"] = None ,__snake_case :int = 3 ,__snake_case :int = 40 ,__snake_case :int = 40 ,) -> Mapping[str, Any]:
setattr(processor.image_processor ,'apply_ocr' ,__snake_case )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ = processor.tokenizer.num_special_tokens_to_add(__snake_case )
a__ = compute_effective_axis_dimension(
__snake_case ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__snake_case )
# Generate dummy inputs according to compute batch and sequence
a__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
a__ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a__ = self._generate_dummy_images(__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = dict(
processor(
__snake_case ,text=__snake_case ,boxes=__snake_case ,return_tensors=__snake_case ,) )
return inputs
| 335
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : Any = '''OwlViTImageProcessor'''
UpperCAmelCase__ : Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self :Any ,__snake_case :Tuple=None ,__snake_case :Optional[int]=None ,**__snake_case :Dict ) -> Tuple:
a__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,__snake_case ,)
a__ = kwargs.pop('feature_extractor' )
a__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__snake_case ,__snake_case )
def __call__( self :str ,__snake_case :List[str]=None ,__snake_case :Optional[Any]=None ,__snake_case :List[str]=None ,__snake_case :Union[str, Any]="max_length" ,__snake_case :int="np" ,**__snake_case :List[str] ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(__snake_case ,__snake_case ) or (isinstance(__snake_case ,__snake_case ) and not isinstance(text[0] ,__snake_case )):
a__ = [self.tokenizer(__snake_case ,padding=__snake_case ,return_tensors=__snake_case ,**__snake_case )]
elif isinstance(__snake_case ,__snake_case ) and isinstance(text[0] ,__snake_case ):
a__ = []
# Maximum number of queries across batch
a__ = max([len(__snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__snake_case ) != max_num_queries:
a__ = t + [' '] * (max_num_queries - len(__snake_case ))
a__ = self.tokenizer(__snake_case ,padding=__snake_case ,return_tensors=__snake_case ,**__snake_case )
encodings.append(__snake_case )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
a__ = np.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = np.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
a__ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
a__ = torch.cat([encoding['input_ids'] for encoding in encodings] ,dim=0 )
a__ = torch.cat([encoding['attention_mask'] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
a__ = tf.stack([encoding['input_ids'] for encoding in encodings] ,axis=0 )
a__ = tf.stack([encoding['attention_mask'] for encoding in encodings] ,axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
a__ = BatchEncoding()
a__ = input_ids
a__ = attention_mask
if query_images is not None:
a__ = BatchEncoding()
a__ = self.image_processor(
__snake_case ,return_tensors=__snake_case ,**__snake_case ).pixel_values
a__ = query_pixel_values
if images is not None:
a__ = self.image_processor(__snake_case ,return_tensors=__snake_case ,**__snake_case )
if text is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
a__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) ,tensor_type=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,*__snake_case :Union[str, Any] ,**__snake_case :Any ) -> Dict:
return self.image_processor.post_process(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,*__snake_case :List[str] ,**__snake_case :List[str] ) -> Dict:
return self.image_processor.post_process_object_detection(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,*__snake_case :List[Any] ,**__snake_case :Tuple ) -> Tuple:
return self.image_processor.post_process_image_guided_detection(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Dict ,*__snake_case :str ,**__snake_case :Optional[Any] ) -> List[Any]:
return self.tokenizer.batch_decode(*__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,*__snake_case :Union[str, Any] ,**__snake_case :Dict ) -> List[str]:
return self.tokenizer.decode(*__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,__snake_case ,)
return self.image_processor_class
@property
def lowerCamelCase__( self :List[Any] ) -> Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,__snake_case ,)
return self.image_processor
| 335
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
|
a_ : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 484
| 1
|
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> str:
'''simple docstring'''
return "\n".join(
f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 72
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = scope
def lowerCAmelCase__ ( self ):
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = None
if self.use_input_mask:
a_ = random_attention_mask([self.batch_size, self.seq_length] )
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ = ids_tensor([self.batch_size] , self.num_choices )
a_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
a_ = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
a_ = BioGptForCausalLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
# create attention mask
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase )
a_ = self.seq_length // 2
a_ = 0
# first forward pass
a_ , a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
a_ = ids_tensor((1,) , UpperCAmelCase ).item() + 1
a_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
a_ = random_other_next_tokens
# append to next input_ids and attn_mask
a_ = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCAmelCase )] , dim=1 , )
# get two different outputs
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
a_ = model(UpperCAmelCase , past_key_values=UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ = output_from_no_past[:, -1, random_slice_idx].detach()
a_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(config=UpperCAmelCase ).to(UpperCAmelCase ).eval()
a_ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCAmelCase )
# first forward pass
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , use_cache=UpperCAmelCase )
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1 )
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )["""last_hidden_state"""]
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase )[
"""last_hidden_state"""
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase , UpperCAmelCase=False ):
a_ = BioGptForCausalLM(UpperCAmelCase )
model.to(UpperCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
a_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCAmelCase__ ( self , UpperCAmelCase , *UpperCAmelCase ):
a_ = BioGptModel(UpperCAmelCase )
a_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ):
a_ = self.num_labels
a_ = BioGptForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self ):
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ : Optional[int] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : int = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Union[str, Any] = False
def lowerCAmelCase__ ( self ):
a_ = BioGptModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*UpperCAmelCase , gradient_checkpointing=UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(UpperCAmelCase )
a_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
a_ = """left"""
# Define PAD Token = EOS Token = 50256
a_ = tokenizer.eos_token
a_ = model.config.eos_token_id
# use different length sentences to test batching
a_ = [
"""Hello, my dog is a little""",
"""Today, I""",
]
a_ = tokenizer(UpperCAmelCase , return_tensors="""pt""" , padding=UpperCAmelCase )
a_ = inputs["""input_ids"""].to(UpperCAmelCase )
a_ = model.generate(
input_ids=UpperCAmelCase , attention_mask=inputs["""attention_mask"""].to(UpperCAmelCase ) , )
a_ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase )
a_ = model.generate(input_ids=UpperCAmelCase )
a_ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
a_ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase )
a_ = model.generate(input_ids=UpperCAmelCase , max_length=model.config.max_length - num_paddings )
a_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase )
a_ = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def lowerCAmelCase__ ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = BioGptModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = input_dict["""input_ids"""]
a_ = input_ids.ne(1 ).to(UpperCAmelCase )
a_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a_ = BioGptForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase__ ( self ):
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = 3
a_ = """multi_label_classification"""
a_ = input_dict["""input_ids"""]
a_ = input_ids.ne(1 ).to(UpperCAmelCase )
a_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a_ = BioGptForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
a_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
a_ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
a_ = model(UpperCAmelCase )[0]
a_ = 4_23_84
a_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
a_ = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase__ ( self ):
a_ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
a_ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(UpperCAmelCase )
torch.manual_seed(0 )
a_ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(UpperCAmelCase )
a_ = model.generate(
**UpperCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=UpperCAmelCase , )
a_ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase )
a_ = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 263
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class __a ( __A ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = PILImageResampling.BILINEAR , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'shortest_edge': 256}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : Any = do_center_crop
SCREAMING_SNAKE_CASE_ : List[Any] = crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = PILImageResampling.BICUBIC , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Tuple = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_resize_output_image_size(UpperCamelCase__ , size=size['shortest_edge'] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ):
SCREAMING_SNAKE_CASE_ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : int = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : int = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Optional[int] = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Dict = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 704
|
import math
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
'''simple docstring'''
return math.pow(lowerCAmelCase__ , 2 ) - a
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
return 2 * x
def _lowerCamelCase( lowerCAmelCase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE_ : str = math.pow(lowerCAmelCase__ , 2 )
return start
def _lowerCamelCase( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 9999 , lowerCAmelCase__ : float = 0.00_000_000_000_001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_initial_point(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = value
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value - fx(lowerCAmelCase__ , lowerCAmelCase__ ) / fx_derivative(lowerCAmelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 97
| 0
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Union[str, Any] = UniSpeechSatForSequenceClassification.from_pretrained(_UpperCamelCase, config=_UpperCamelCase )
lowercase : Optional[Any] = downstream_dict['''projector.weight''']
lowercase : List[str] = downstream_dict['''projector.bias''']
lowercase : Union[str, Any] = downstream_dict['''model.post_net.linear.weight''']
lowercase : int = downstream_dict['''model.post_net.linear.bias''']
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : int = UniSpeechSatForAudioFrameClassification.from_pretrained(_UpperCamelCase, config=_UpperCamelCase )
lowercase : str = downstream_dict['''model.linear.weight''']
lowercase : Union[str, Any] = downstream_dict['''model.linear.bias''']
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Dict = UniSpeechSatForXVector.from_pretrained(_UpperCamelCase, config=_UpperCamelCase )
lowercase : Union[str, Any] = downstream_dict['''connector.weight''']
lowercase : Optional[Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase : int = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowercase : Any = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowercase : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowercase : str = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location='''cpu''' )
lowercase : Dict = checkpoint['''Downstream''']
lowercase : Tuple = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
lowercase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase, return_attention_mask=_UpperCamelCase, do_normalize=_UpperCamelCase )
lowercase : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowercase : List[Any] = convert_classification(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowercase : Optional[Any] = convert_diarization(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
elif arch.endswith('''ForXVector''' ):
lowercase : Union[str, Any] = convert_xvector(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowercase : Optional[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__a = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 319
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase : Any = load_dataset('''ashraq/esc50''' )
lowercase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : Optional[int] = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
@slow
@require_torch
def __lowerCamelCase ( self ):
lowercase : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase : List[str] = load_dataset('''ashraq/esc50''' )
lowercase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
lowercase : Dict = audio_classifier(SCREAMING_SNAKE_CASE__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase : Tuple = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase : Dict = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def __lowerCamelCase ( self ):
pass
| 319
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def snake_case ( self : Dict )-> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : List[str] =ort.SessionOptions()
lowerCamelCase__ : List[Any] =False
return options
def snake_case ( self : Union[str, Any] )-> Optional[Any]:
lowerCamelCase__ : Dict =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCamelCase__ : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCamelCase__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowerCamelCase__ : Any =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''', revision='''onnx''', safety_checker=lowercase_, feature_extractor=lowercase_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCamelCase__ : int ='''A red cat sitting on a park bench'''
lowerCamelCase__ : Any =np.random.RandomState(0 )
lowerCamelCase__ : Dict =pipe(
prompt=lowercase_, image=lowercase_, mask_image=lowercase_, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=lowercase_, output_type='''np''', )
lowerCamelCase__ : Union[str, Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 716
|
"""simple docstring"""
def snake_case__ ( __lowerCamelCase : int = 4000000 ):
"""simple docstring"""
lowerCamelCase__ : Dict =[]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =b, a + b
return sum(__lowerCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 625
| 0
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : bool = False ):
if radian_mode:
return [magnitude * cos(UpperCamelCase__ ), magnitude * sin(UpperCamelCase__ )]
return [magnitude * cos(radians(UpperCamelCase__ ) ), magnitude * sin(radians(UpperCamelCase__ ) )]
def lowerCamelCase_ (UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : NDArray[floataa] , UpperCamelCase__ : float = 10**-1 ):
_UpperCAmelCase : NDArray[floataa] = cross(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : float = sum(UpperCamelCase__ )
return abs(UpperCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
_lowerCAmelCase :Union[str, Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowerCAmelCase :NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowerCAmelCase :Union[str, Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowerCAmelCase :List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowerCAmelCase :str = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_lowerCAmelCase :int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 506
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase :Optional[int] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCAmelCase : Dict = []
for num in range(len(UpperCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_UpperCAmelCase : Any = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase__ ) == n:
return list_nums
return []
def lowerCamelCase_ ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"{solution() = }")
| 506
| 1
|
from __future__ import annotations
from math import pow, sqrt
def a ( A__ , A__ , A__ ) -> List[str]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Optional[Any] = logging.get_logger(__name__)
class lowercase :
def __init__( self : Dict , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : List[str]=None , _lowercase : List[Any]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[Any] , _lowercase : List[str] ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : int , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = text
def lowercase__ ( self : Union[str, Any] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : List[Any] = None
def lowercase__ ( self : Optional[int] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : int ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : str , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token
def lowercase__ ( self : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Tuple = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : List[str] = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Dict=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : str , _lowercase : Conversation , _lowercase : Optional[int]=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Optional[int] = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : int , _lowercase : Optional[int] , _lowercase : Dict=10 , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Any = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Dict = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Any = max_length
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict=True ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Any , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 250
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
_lowercase = logging.getLogger(__name__)
_lowercase = {'''facebook/bart-base''': BartForConditionalGeneration}
_lowercase = {'''facebook/bart-base''': BartTokenizer}
def UpperCamelCase ( ):
lowerCAmelCase_ : int = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph.")
parser.add_argument(
"--validation_file" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="A csv or a json file containing the validation data.")
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--config_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Where to store the final ONNX file.")
lowerCAmelCase_ : Tuple = parser.parse_args()
return args
def UpperCamelCase ( snake_case__ , snake_case__="cpu"):
lowerCAmelCase_ : List[Any] = model_dict[model_name].from_pretrained(__lowerCAmelCase).to(__lowerCAmelCase)
lowerCAmelCase_ : Any = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase)
if model_name in ["facebook/bart-base"]:
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = None
lowerCAmelCase_ : Optional[int] = 0
return huggingface_model, tokenizer
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
model.eval()
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : List[Any] = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase))
with torch.no_grad():
lowerCAmelCase_ : int = "My friends are cool but they eat too many carbs."
lowerCAmelCase_ : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="pt").to(model.device)
lowerCAmelCase_ : Dict = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCAmelCase , )
logger.info("Model exported to {}".format(__lowerCAmelCase))
lowerCAmelCase_ : Any = remove_dup_initializers(os.path.abspath(__lowerCAmelCase))
logger.info("Deduplicated and optimized model written to {}".format(__lowerCAmelCase))
lowerCAmelCase_ : int = onnxruntime.InferenceSession(__lowerCAmelCase)
lowerCAmelCase_ : List[Any] = ort_sess.run(
__lowerCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCAmelCase),
"max_length": np.array(__lowerCAmelCase),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("Model outputs from torch and ONNX Runtime are similar.")
logger.info("Success.")
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = parse_args()
lowerCAmelCase_ : Dict = 5
lowerCAmelCase_ : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase_ : List[str] = torch.device(args.device)
lowerCAmelCase_ , lowerCAmelCase_ : str = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
model.to(__lowerCAmelCase)
if args.max_length:
lowerCAmelCase_ : int = args.max_length
if args.num_beams:
lowerCAmelCase_ : Union[str, Any] = args.num_beams
if args.output_file_path:
lowerCAmelCase_ : List[str] = args.output_file_path
else:
lowerCAmelCase_ : List[str] = "BART.onnx"
logger.info("Exporting model to ONNX")
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
if __name__ == "__main__":
main()
| 659
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self , lowerCamelCase ):
snake_case__ = data
snake_case__ = None
def __str__( self ):
return F"""{self.data}"""
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ):
snake_case__ = None
def __iter__( self ):
snake_case__ = self.top
while node:
yield node.data
snake_case__ = node.next
def __str__( self ):
return "->".join([str(lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def A_ ( self ):
return self.top is None
def A_ ( self , lowerCamelCase ):
snake_case__ = Node(lowerCamelCase )
if not self.is_empty():
snake_case__ = self.top
snake_case__ = node
def A_ ( self ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase )
snake_case__ = self.top
snake_case__ = self.top.next
return pop_node.data
def A_ ( self ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def A_ ( self ):
snake_case__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 276
| 0
|
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( __snake_case : int = 8 ) -> Optional[Any]:
"""simple docstring"""
A__ : str =ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> Tuple:
"""simple docstring"""
i -= len(_lowerCAmelCase )
A__ : Union[str, Any] =i // 3
A__ : str =i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
A__ : Optional[Any] =(
chars_incl
+ random(_lowerCAmelCase, quotient + remainder )
+ random(_lowerCAmelCase, _lowerCAmelCase )
+ random(_lowerCAmelCase, _lowerCAmelCase )
)
A__ : Optional[Any] =list(_lowerCAmelCase )
shuffle(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( __snake_case : str, __snake_case : int ) -> int:
"""simple docstring"""
return "".join(secrets.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
def __lowerCamelCase ( __snake_case : List[str], __snake_case : Tuple ) -> Any:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __snake_case : int, __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Dict ) -> int:
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( __snake_case : str, __snake_case : int = 8 ) -> Tuple:
"""simple docstring"""
if len(_lowerCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
A__ : List[Any] =any(char in ascii_uppercase for char in password )
A__ : str =any(char in ascii_lowercase for char in password )
A__ : Any =any(char in digits for char in password )
A__ : Tuple =any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : str =int(input("""Please indicate the max length of your password: """ ).strip() )
A__ : Any =input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""", password_generator(_lowerCAmelCase ) )
print(
"""Alternative Password generated:""", alternative_password_generator(_lowerCAmelCase, _lowerCAmelCase ), )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 711
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : int = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
__snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 687
| 0
|
'''simple docstring'''
import numpy as np
def __A ( lowerCAmelCase_ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 414
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __lowerCAmelCase ( __a ):
snake_case : int = """dpt"""
def __init__(self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=3_8_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=[2, 5, 8, 1_1] , lowerCAmelCase__="project" , lowerCAmelCase__=[4, 2, 1, 0.5] , lowerCAmelCase__=[9_6, 1_9_2, 3_8_4, 7_6_8] , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=-1 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=2_5_5 , lowerCAmelCase__=0.1 , lowerCAmelCase__=[1, 1_0_2_4, 2_4, 2_4] , lowerCAmelCase__=[0, 1] , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase : str = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
_UpperCAmelCase : int = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
_UpperCAmelCase : Union[str, Any] = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
_UpperCAmelCase : Dict = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Union[str, Any] = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
_UpperCAmelCase : str = readout_type
_UpperCAmelCase : Optional[int] = reassemble_factors
_UpperCAmelCase : Union[str, Any] = neck_hidden_sizes
_UpperCAmelCase : Optional[int] = fusion_hidden_size
_UpperCAmelCase : Tuple = head_in_index
_UpperCAmelCase : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : int = use_auxiliary_head
_UpperCAmelCase : Optional[Any] = auxiliary_loss_weight
_UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
_UpperCAmelCase : Optional[Any] = semantic_classifier_dropout
def snake_case_ (self ):
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : List[Any] = self.backbone_config.to_dict()
_UpperCAmelCase : List[Any] = self.__class__.model_type
return output
| 414
| 1
|
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
if not head:
return True
# split the list to two parts
A , A = head.next, head
while fast and fast.next:
A = fast.next.next
A = slow.next
A = slow.next
A = None # Don't forget here! But forget still works!
# reverse the second part
A = None
while second:
A = second.next
A = node
A = second
A = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
A = node.next
A = head.next
return True
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
A = A = A = head
while fast and fast.next:
A , A = fast.next.next, slow.next
# 2. Push the second half into the stack
A = [slow.val]
while slow.next:
A = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
A = cur.next
return True
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> str:
'''simple docstring'''
if not head or not head.next:
return True
A = {}
A = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase__ )
else:
A = [pos]
A = head.next
pos += 1
A = pos - 1
A = 0
for v in d.values():
if len(lowerCAmelCase__ ) % 2 != 0:
middle += 1
else:
A = 0
for i in range(0 , len(lowerCAmelCase__ ) ):
if v[i] + v[len(lowerCAmelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 714
|
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
A = ''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
A = ''
for i in range(len(lowerCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A = int('0b' + data[0] + data[-1] , 2 )
A = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A = message[:4]
A = message[4:]
A = apply_table(lowerCAmelCase__ , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
A = apply_sbox(lowerCAmelCase__ , temp[:4] ) # noqa: E741
A = apply_sbox(lowerCAmelCase__ , temp[4:] )
A = '0' * (2 - len(lowerCAmelCase__ )) + l # noqa: E741
A = '0' * (2 - len(lowerCAmelCase__ )) + r
A = apply_table(l + r , lowerCAmelCase__ )
A = xor(lowerCAmelCase__ , lowerCAmelCase__ )
return temp + right
if __name__ == "__main__":
__snake_case :Tuple =input('Enter 10 bit key: ')
__snake_case :Union[str, Any] =input('Enter 8 bit message: ')
__snake_case :int =[6, 3, 7, 4, 8, 5, 10, 9]
__snake_case :Dict =[3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__snake_case :Optional[int] =[2, 4, 3, 1]
__snake_case :Tuple =[2, 6, 3, 1, 4, 8, 5, 7]
__snake_case :Dict =[4, 1, 3, 5, 7, 2, 8, 6]
__snake_case :str =[4, 1, 2, 3, 2, 3, 4, 1]
__snake_case :Optional[int] =[[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__snake_case :Optional[Any] =[[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__snake_case :List[Any] =apply_table(key, paa_table)
__snake_case :Tuple =temp[:5]
__snake_case :int =temp[5:]
__snake_case :int =left_shift(left)
__snake_case :List[str] =left_shift(right)
__snake_case :Tuple =apply_table(left + right, pa_table)
__snake_case :List[Any] =left_shift(left)
__snake_case :str =left_shift(right)
__snake_case :Optional[Any] =left_shift(left)
__snake_case :Optional[Any] =left_shift(right)
__snake_case :Any =apply_table(left + right, pa_table)
# encryption
__snake_case :Optional[Any] =apply_table(message, IP)
__snake_case :Optional[int] =function(expansion, sa, sa, keya, temp)
__snake_case :Dict =temp[4:] + temp[:4]
__snake_case :Any =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__snake_case :str =apply_table(CT, IP)
__snake_case :str =function(expansion, sa, sa, keya, temp)
__snake_case :List[str] =temp[4:] + temp[:4]
__snake_case :Tuple =function(expansion, sa, sa, keya, temp)
__snake_case :Optional[int] =apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 224
| 0
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowercase: Tuple = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
_lowercase: Optional[int] = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowercase: Optional[int] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowercase: Dict = sorted(arg_to_scheduler.keys())
_lowercase: List[Any] = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class lowerCamelCase__ ( pl.LightningModule ):
def __init__( self : Dict , lowercase__ : argparse.Namespace , lowercase__ : List[str]=None , lowercase__ : Any="base" , lowercase__ : Optional[Any]=None , lowercase__ : int=None , lowercase__ : int=None , **lowercase__ : Optional[Any] , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase__ )
_lowerCAmelCase = 0
_lowerCAmelCase = Path(self.hparams.output_dir )
_lowerCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_lowerCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , )
else:
_lowerCAmelCase = config
_lowerCAmelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , lowercase__ , lowercase__ ):
assert hasattr(self.config , lowercase__ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) )
if tokenizer is None:
_lowerCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , )
else:
_lowerCAmelCase = tokenizer
_lowerCAmelCase = MODEL_MODES[mode]
if model is None:
_lowerCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , )
else:
_lowerCAmelCase = model
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , *lowercase__ : Dict , **lowercase__ : Tuple ):
_lowerCAmelCase = self.model_type.from_pretrained(*lowercase__ , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
_lowerCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_lowerCAmelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = self.model
_lowerCAmelCase = ['bias', 'LayerNorm.weight']
_lowerCAmelCase = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_lowerCAmelCase = Adafactor(
lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ )
else:
_lowerCAmelCase = AdamW(
lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_lowerCAmelCase = optimizer
_lowerCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] ):
return self.validation_step(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Any ):
return self.validation_end(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_lowerCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : str ):
if stage == "test":
_lowerCAmelCase = len(self.test_dataloader().dataset )
else:
_lowerCAmelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=lowercase__ )
_lowerCAmelCase = len(self.train_dataloader().dataset )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : str , lowercase__ : int , lowercase__ : bool = False ):
raise NotImplementedError('You must implement this for your task' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.train_loader
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : int ):
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Dict[str, Any] ):
_lowerCAmelCase = self.output_dir.joinpath('best_tfmr' )
_lowerCAmelCase = self.step_count
self.model.save_pretrained(lowercase__ )
self.tokenizer.save_pretrained(lowercase__ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowercase__ : Dict , lowercase__ : Union[str, Any] ):
parser.add_argument(
'--model_name_or_path' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=lowercase__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=lowercase__ , type=lowercase__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(lowercase__ ).parent / 'test_run' / 'cache' ) , type=lowercase__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=lowercase__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=lowercase__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=lowercase__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=lowercase__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=lowercase__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=lowercase__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=lowercase__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=lowercase__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=lowercase__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=lowercase__ )
parser.add_argument('--train_batch_size' , default=32 , type=lowercase__ )
parser.add_argument('--eval_batch_size' , default=32 , type=lowercase__ )
parser.add_argument('--adafactor' , action='store_true' )
class lowerCamelCase__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : int ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Dict , lowercase__ : int ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase__ )
class lowerCamelCase__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] ):
_lowerCAmelCase = trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : pl.Trainer , lowercase__ : pl.LightningModule ):
rank_zero_info('***** Validation results *****' )
_lowerCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowercase__ , str(metrics[key] ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : pl.Trainer , lowercase__ : pl.LightningModule ):
rank_zero_info('***** Test results *****' )
_lowerCAmelCase = trainer.callback_metrics
# Log and save results to file
_lowerCAmelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(lowercase__ , 'w' ) as writer:
for key in sorted(lowercase__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(lowercase__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(lowercase__ , str(metrics[key] ) ) )
def _lowerCamelCase ( snake_case , snake_case ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _lowerCamelCase ( snake_case , snake_case , snake_case=None , snake_case=True , snake_case=[] , snake_case=None , snake_case=None , **snake_case , ):
pl.seed_everything(args.seed )
# init model
_lowerCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=snake_case )
# add custom checkpoints
if checkpoint_callback is None:
_lowerCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(snake_case )
if logging_callback is None:
_lowerCAmelCase = LoggingCallback()
_lowerCAmelCase = {}
if args.fpaa:
_lowerCAmelCase = 16
if args.gpus > 1:
_lowerCAmelCase = 'auto'
_lowerCAmelCase = 'ddp'
_lowerCAmelCase = args.accumulate_grad_batches
_lowerCAmelCase = None
_lowerCAmelCase = 'auto'
_lowerCAmelCase = pl.Trainer.from_argparse_args(
snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , )
if args.do_train:
trainer.fit(snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 192
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Dict , lowercase__ : Dict , lowercase__ : Optional[Any]=13 , lowercase__ : Dict=7 , lowercase__ : Dict=True , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=False , lowercase__ : Any=True , lowercase__ : Union[str, Any]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : List[str]=64 , lowercase__ : Any="gelu" , lowercase__ : Optional[Any]=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Dict=5_12 , lowercase__ : List[str]=16 , lowercase__ : Union[str, Any]=2 , lowercase__ : str=0.0_2 , lowercase__ : Optional[int]=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[int]=2 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=2 , lowercase__ : Optional[int]=2 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=1 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
_lowerCAmelCase = q_groups
_lowerCAmelCase = k_groups
_lowerCAmelCase = v_groups
_lowerCAmelCase = post_attention_groups
_lowerCAmelCase = intermediate_groups
_lowerCAmelCase = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Dict , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Tuple ):
_lowerCAmelCase = SqueezeBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , lowercase__ )
_lowerCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] ):
_lowerCAmelCase = SqueezeBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : int ):
_lowerCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Tuple ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[int] ):
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = SqueezeBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Any ):
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = SqueezeBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ =(
{
"feature-extraction": SqueezeBertModel,
"fill-mask": SqueezeBertForMaskedLM,
"question-answering": SqueezeBertForQuestionAnswering,
"text-classification": SqueezeBertForSequenceClassification,
"token-classification": SqueezeBertForTokenClassification,
"zero-shot": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = SqueezeBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ , dim=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = SqueezeBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
_lowerCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
_lowerCAmelCase = model(lowercase__ )[0]
_lowerCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase__ )
_lowerCAmelCase = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-4 ) )
| 192
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = """dpt"""
def __init__( self : Optional[int] , __UpperCamelCase : str=768 , __UpperCamelCase : Any=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : str=0.0 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : List[Any]=1e-1_2 , __UpperCamelCase : List[Any]=384 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=[2, 5, 8, 11] , __UpperCamelCase : Optional[int]="project" , __UpperCamelCase : Optional[Any]=[4, 2, 1, 0.5] , __UpperCamelCase : int=[96, 192, 384, 768] , __UpperCamelCase : Union[str, Any]=256 , __UpperCamelCase : List[str]=-1 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=0.4 , __UpperCamelCase : Tuple=255 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[Any]=[1, 1_024, 24, 24] , __UpperCamelCase : Any=[0, 1] , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Dict , ):
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_UpperCAmelCase = BitConfig(**__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase = BitConfig(**__UpperCamelCase )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = []
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
_UpperCAmelCase = readout_type
_UpperCAmelCase = reassemble_factors
_UpperCAmelCase = neck_hidden_sizes
_UpperCAmelCase = fusion_hidden_size
_UpperCAmelCase = head_in_index
_UpperCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = semantic_loss_ignore_index
_UpperCAmelCase = semantic_classifier_dropout
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 129
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowerCAmelCase = pd.read_csv("sample_data.csv", header=None)
__lowerCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowerCAmelCase = df.iloc[:, 1:2]
__lowerCAmelCase = actual_data.values.reshape(len_data, 1)
__lowerCAmelCase = MinMaxScaler().fit_transform(actual_data)
__lowerCAmelCase = 1_0
__lowerCAmelCase = 5
__lowerCAmelCase = 2_0
__lowerCAmelCase = len_data - periods * look_back
__lowerCAmelCase = actual_data[:division]
__lowerCAmelCase = actual_data[division - look_back :]
__lowerCAmelCase , __lowerCAmelCase = [], []
__lowerCAmelCase , __lowerCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowerCAmelCase = np.array(train_x)
__lowerCAmelCase = np.array(test_x)
__lowerCAmelCase = np.array([list(i.ravel()) for i in train_y])
__lowerCAmelCase = np.array([list(i.ravel()) for i in test_y])
__lowerCAmelCase = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__lowerCAmelCase = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__lowerCAmelCase = model.predict(x_test)
| 129
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : Optional[int] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 215
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 215
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase__ :
def __init__( self : str , lowerCamelCase : Any , ):
'''simple docstring'''
a__ = parent
a__ = 1_3
a__ = 7
a__ = True
a__ = True
a__ = True
a__ = 9_9
a__ = 3_2
a__ = 2
a__ = 4
a__ = 3_7
a__ = "gelu"
a__ = 0.1
a__ = 0.1
a__ = 5_1_2
a__ = 1_6
a__ = 2
a__ = 0.02
a__ = 3
a__ = 4
a__ = None
def __a ( self : Tuple ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self : Any ):
'''simple docstring'''
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = self.prepare_config_and_inputs()
a__ = True
a__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
'''simple docstring'''
a__ = TFEsmModel(config=lowerCamelCase )
a__ = {"input_ids": input_ids, "attention_mask": input_mask}
a__ = model(lowerCamelCase )
a__ = [input_ids, input_mask]
a__ = model(lowerCamelCase )
a__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , ):
'''simple docstring'''
a__ = True
a__ = TFEsmModel(config=lowerCamelCase )
a__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
a__ = model(lowerCamelCase )
a__ = [input_ids, input_mask]
a__ = model(lowerCamelCase , encoder_hidden_states=lowerCamelCase )
# Also check the case where encoder outputs are not passed
a__ = model(lowerCamelCase , attention_mask=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a__ = TFEsmForMaskedLM(config=lowerCamelCase )
a__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : str ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFEsmForTokenClassification(config=lowerCamelCase )
a__ = {"input_ids": input_ids, "attention_mask": input_mask}
a__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : str ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : Dict = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : List[Any] = False
def __a ( self : List[str] ):
'''simple docstring'''
a__ = TFEsmModelTester(self )
a__ = ConfigTester(self , config_class=lowerCamelCase , hidden_size=3_7 )
def __a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self : Tuple ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __a ( self : int ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def __a ( self : Optional[int] ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __a ( self : Any ):
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def __a ( self : List[str] ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFEsmModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip("Protein models do not support embedding resizing." )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
def __a ( self : Optional[Any] ):
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a__ = model.get_bias()
assert isinstance(lowerCamelCase , lowerCamelCase )
for k, v in name.items():
assert isinstance(lowerCamelCase , tf.Variable )
else:
a__ = model.get_output_embeddings()
assert x is None
a__ = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def __a ( self : List[str] ):
'''simple docstring'''
a__ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
a__ = model(lowerCamelCase )[0]
a__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase )
# compare the actual values for a slice.
a__ = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
a__ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
a__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
a__ = model(lowerCamelCase )[0]
# compare the actual values for a slice.
a__ = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 289
|
'''simple docstring'''
import re
def _lowerCamelCase (__lowerCamelCase : str ) -> bool:
a__ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ : Any = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 289
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
snake_case_ = logging.get_logger(__name__)
class a__ ( _lowercase ):
def __init__(self : Any, *__UpperCAmelCase : Dict, **__UpperCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''', __UpperCAmelCase, )
super().__init__(*__UpperCAmelCase, **__UpperCAmelCase )
| 507
|
'''simple docstring'''
import random
from typing import Any
def __lowercase (_SCREAMING_SNAKE_CASE :list ):
for _ in range(len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE : List[str] = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
SCREAMING_SNAKE_CASE : str = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = data[b], data[a]
return data
if __name__ == "__main__":
snake_case_ = [0, 1, 2, 3, 4, 5, 6, 7]
snake_case_ = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 507
| 1
|
"""simple docstring"""
snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}]
snake_case = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 719
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
snake_case = 'main'
# Default branch name
snake_case = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
snake_case = 'aaaaaaa'
# This commit does not exist, so we should 404.
snake_case = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
snake_case = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def UpperCamelCase_ ( ):
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCamelCase_ ( ):
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self ) -> str:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> str:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> str:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def A ( self , lowercase__ ) -> List[Any]:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def A ( self ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_tf
def A ( self ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
self.assertEqual(find_labels(lowercase__ ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(lowercase__ ) , ['start_positions', 'end_positions'] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , ['labels'] )
@require_flax
def A ( self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
self.assertEqual(find_labels(lowercase__ ) , [] )
class UpperCamelCase ( __magic_name__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(lowercase__ ) , [] )
| 406
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Union[str, Any] = 'A painting of a squirrel eating a burger '
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Any = generator.manual_seed(0 )
_lowerCamelCase : Union[str, Any] = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def A_ ( self ):
_lowerCamelCase : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Optional[int] = 'A painting of a squirrel eating a burger '
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
_lowerCamelCase : Optional[Any] = pipe(
prompt=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_lowerCamelCase : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Union[str, Any] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 630
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {"""cls_token""": """<s>"""}
def A_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_lowerCamelCase : Optional[int] = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_lowerCamelCase : Optional[Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , **lowercase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : Any = 'lower newer'
_lowerCamelCase : Union[str, Any] = 'lower newer'
return input_text, output_text
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : List[str] = 'lower newer'
_lowerCamelCase : Tuple = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_lowerCamelCase : List[str] = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def A_ ( self ):
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_lowerCamelCase : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_lowerCamelCase : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_lowerCamelCase : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowercase )
_lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A_ ( self ):
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : str = 'Encode this sequence.'
_lowerCamelCase : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_lowerCamelCase : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
_lowerCamelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowerCamelCase : Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
_lowerCamelCase : List[Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
_lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(lowercase )
_lowerCamelCase : Optional[int] = 'Encode <mask> sequence'
_lowerCamelCase : int = 'Encode <mask>sequence'
_lowerCamelCase : str = tokenizer.encode(lowercase )
_lowerCamelCase : Tuple = encoded.index(lowercase )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : Any = tokenizer.encode(lowercase )
_lowerCamelCase : Union[str, Any] = encoded.index(lowercase )
_lowerCamelCase : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def A_ ( self ):
pass
def A_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_lowerCamelCase : List[str] = 'A, <mask> AllenNLP sentence.'
_lowerCamelCase : List[Any] = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCamelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A_ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCamelCase : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase )
def A_ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Dict = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : int = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : int = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
| 630
| 1
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , *lowercase , **lowercase ) -> None:
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowercase , )
super().__init__(*lowercase , **lowercase )
| 313
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def lowerCamelCase_ ( lowerCamelCase__ ):
if "cls_token" in name:
lowerCamelCase_ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
lowerCamelCase_ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
lowerCamelCase_ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
lowerCamelCase_ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
lowerCamelCase_ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
lowerCamelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
lowerCamelCase_ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
lowerCamelCase_ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
lowerCamelCase_ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
lowerCamelCase_ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowerCamelCase_ = key.split("." )
lowerCamelCase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowerCamelCase_ = config.decoder_hidden_size
lowerCamelCase_ = "decoder.decoder_layers."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = config.hidden_size
lowerCamelCase_ = "vit.encoder.layer."
if "weight" in key:
lowerCamelCase_ = val[:dim, :]
lowerCamelCase_ = val[dim : dim * 2, :]
lowerCamelCase_ = val[-dim:, :]
elif "bias" in key:
lowerCamelCase_ = val[:dim]
lowerCamelCase_ = val[dim : dim * 2]
lowerCamelCase_ = val[-dim:]
else:
lowerCamelCase_ = val
return orig_state_dict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCamelCase_ = 1_0_2_4
lowerCamelCase_ = 4_0_9_6
lowerCamelCase_ = 2_4
lowerCamelCase_ = 1_6
elif "huge" in checkpoint_url:
lowerCamelCase_ = 1_4
lowerCamelCase_ = 1_2_8_0
lowerCamelCase_ = 5_1_2_0
lowerCamelCase_ = 3_2
lowerCamelCase_ = 1_6
lowerCamelCase_ = ViTMAEForPreTraining(lowerCamelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
lowerCamelCase_ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
lowerCamelCase_ = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowerCamelCase_ = ViTMAEImageProcessor(size=config.image_size )
lowerCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase_ = model(**lowerCamelCase__ )
lowerCamelCase_ = outputs.logits
if "large" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-0.73_09, -0.71_28, -1.01_69], [-1.01_61, -0.90_58, -1.18_78], [-1.04_78, -0.94_11, -1.19_11]] )
elif "huge" in checkpoint_url:
lowerCamelCase_ = torch.tensor(
[[-1.15_99, -0.91_99, -1.22_21], [-1.19_52, -0.92_69, -1.23_07], [-1.21_43, -0.93_37, -1.22_62]] )
else:
lowerCamelCase_ = torch.tensor(
[[-0.91_92, -0.84_81, -1.12_59], [-1.13_49, -1.00_34, -1.25_99], [-1.17_57, -1.04_29, -1.27_26]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 313
| 1
|
'''simple docstring'''
from math import ceil, sqrt
def UpperCAmelCase_ ( lowerCAmelCase_ = 100_0000 ):
"""simple docstring"""
lowercase = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"{solution() = }")
| 310
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310
| 1
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase (datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ) -> int:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCAmelCase )
class lowerCamelCase (datasets.BeamBasedBuilder ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def A ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCamelCase (A__ ):
@require_beam
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ = DummyBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
SCREAMING_SNAKE_CASE__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
import apache_beam as beam
SCREAMING_SNAKE_CASE__ = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ = DummyBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
SCREAMING_SNAKE_CASE__ = partial(__UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
SCREAMING_SNAKE_CASE__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ = DummyBeamDataset(cache_dir=__UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ = NestedBeamDataset(cache_dir=__UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
SCREAMING_SNAKE_CASE__ = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , __UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , __UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 616
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCamelCase :
def __init__( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int = 1_3 , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 3_7 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 1_0 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1_2_8 , __UpperCAmelCase : List[int] = [2, 2, 2, 2] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = num_attention_outputs
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = embed_dim + 1
SCREAMING_SNAKE_CASE__ = resolution
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = dim
SCREAMING_SNAKE_CASE__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : List[Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Union[str, Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
def check_hidden_states_output(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int ):
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
SCREAMING_SNAKE_CASE__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
SCREAMING_SNAKE_CASE__ = seq_length * self.model_tester.chunk_length
else:
SCREAMING_SNAKE_CASE__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple) )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """decoder_seq_length""" , __UpperCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=False ) -> List[str]:
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """encoder_seq_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """key_length""" , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = getattr(self.model_tester , """chunk_length""" , __UpperCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
SCREAMING_SNAKE_CASE__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
SCREAMING_SNAKE_CASE__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.assertTrue(outputs_dict is not None )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 616
| 1
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase_ ( __snake_case , __snake_case ):
"""simple docstring"""
@register_to_config
def __init__( self : int , UpperCAmelCase : Optional[Any] = 128 , UpperCAmelCase : Union[str, Any] = 256 , UpperCAmelCase : Dict = 2_0_0_0.0 , UpperCAmelCase : str = 768 , UpperCAmelCase : List[str] = 12 , UpperCAmelCase : Optional[Any] = 12 , UpperCAmelCase : List[str] = 64 , UpperCAmelCase : Any = 2048 , UpperCAmelCase : Any = 0.1 , ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase : Tuple =nn.Sequential(
nn.Linear(__UpperCamelCase , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCamelCase ) , nn.SiLU() , )
lowercase : Any =nn.Embedding(__UpperCamelCase , __UpperCamelCase )
lowercase : int =False
lowercase : Tuple =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
lowercase : Dict =nn.Dropout(p=__UpperCamelCase )
lowercase : Tuple =nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
# FiLM conditional T5 decoder
lowercase : Optional[Any] =DecoderLayer(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
self.decoders.append(__UpperCamelCase )
lowercase : str =TaLayerNorm(__UpperCamelCase )
lowercase : Optional[Any] =nn.Dropout(p=__UpperCamelCase )
lowercase : str =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
def A__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def A__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase , lowercase : Optional[int] =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase : Dict =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase : List[str] =self.conditioning_emb(__UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase : List[Any] =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase : str =torch.broadcast_to(
torch.arange(__UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase : Union[str, Any] =self.position_encoding(__UpperCamelCase )
lowercase : Union[str, Any] =self.continuous_inputs_projection(__UpperCamelCase )
inputs += position_encodings
lowercase : List[Any] =self.dropout(__UpperCamelCase )
# decoder: No padding present.
lowercase : List[Any] =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase : Any =[(x, self.encoder_decoder_mask(__UpperCamelCase , __UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase : List[str] =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase : Union[str, Any] =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase : Optional[Any] =lyr(
__UpperCamelCase , conditioning_emb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )[0]
lowercase : Optional[int] =self.decoder_norm(__UpperCamelCase )
lowercase : List[Any] =self.post_dropout(__UpperCamelCase )
lowercase : Union[str, Any] =self.spec_out(__UpperCamelCase )
return spec_out
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict=1e-6 ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase : Optional[Any] =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCamelCase , d_kv=__UpperCamelCase , num_heads=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , layer_norm_epsilon=__UpperCamelCase ) )
def A__ ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , ) -> Any:
'''simple docstring'''
lowercase : Tuple =self.layer[0](
__UpperCamelCase , conditioning_emb=__UpperCamelCase , attention_mask=__UpperCamelCase , )
if encoder_hidden_states is not None:
lowercase : Optional[Any] =torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowercase : Tuple =self.layer[1](
__UpperCamelCase , key_value_states=__UpperCamelCase , attention_mask=__UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
lowercase : Any =self.layer[-1](__UpperCamelCase , __UpperCamelCase )
return (hidden_states,)
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase : int =TaLayerNorm(__UpperCamelCase )
lowercase : List[Any] =TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
lowercase : Dict =Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
lowercase : Optional[int] =nn.Dropout(__UpperCamelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=None , ) -> str:
'''simple docstring'''
lowercase : Any =self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
lowercase : Dict =self.FiLMLayer(__UpperCamelCase , __UpperCamelCase )
# Self-attention block
lowercase : List[str] =self.attention(__UpperCamelCase )
lowercase : Optional[Any] =hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : str =Attention(query_dim=__UpperCamelCase , heads=__UpperCamelCase , dim_head=__UpperCamelCase , out_bias=__UpperCamelCase , scale_qk=__UpperCamelCase )
lowercase : str =TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
lowercase : Union[str, Any] =nn.Dropout(__UpperCamelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str=None , UpperCAmelCase : str=None , ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =self.layer_norm(__UpperCamelCase )
lowercase : int =self.attention(
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowercase : Tuple =hidden_states + self.dropout(__UpperCamelCase )
return layer_output
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase : Optional[Any] =TaDenseGatedActDense(d_model=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase )
lowercase : str =TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCamelCase )
lowercase : Optional[Any] =TaLayerNorm(__UpperCamelCase , eps=__UpperCamelCase )
lowercase : Optional[int] =nn.Dropout(__UpperCamelCase )
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=None ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =self.layer_norm(__UpperCamelCase )
if conditioning_emb is not None:
lowercase : List[Any] =self.film(__UpperCamelCase , __UpperCamelCase )
lowercase : List[str] =self.DenseReluDense(__UpperCamelCase )
lowercase : Optional[Any] =hidden_states + self.dropout(__UpperCamelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
super().__init__()
lowercase : Any =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
lowercase : str =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
lowercase : Union[str, Any] =nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
lowercase : Any =nn.Dropout(__UpperCamelCase )
lowercase : Union[str, Any] =NewGELUActivation()
def A__ ( self : List[Any] , UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self.act(self.wi_a(__UpperCamelCase ) )
lowercase : Dict =self.wi_a(__UpperCamelCase )
lowercase : Optional[int] =hidden_gelu * hidden_linear
lowercase : str =self.dropout(__UpperCamelCase )
lowercase : Tuple =self.wo(__UpperCamelCase )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=1e-6 ) -> Any:
'''simple docstring'''
super().__init__()
lowercase : Tuple =nn.Parameter(torch.ones(__UpperCamelCase ) )
lowercase : Tuple =eps
def A__ ( self : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCamelCase )
lowercase : Optional[Any] =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase : List[Any] =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def A__ ( self : Tuple , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(__UpperCamelCase , 3.0 )) ))
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowercase : Union[str, Any] =nn.Linear(__UpperCamelCase , out_features * 2 , bias=__UpperCamelCase )
def A__ ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =self.scale_bias(__UpperCamelCase )
lowercase , lowercase : Tuple =torch.chunk(__UpperCamelCase , 2 , -1 )
lowercase : Optional[int] =x * (1 + scale) + shift
return x
| 94
|
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
class SCREAMING_SNAKE_CASE ( metaclass=__snake_case ):
"""simple docstring"""
__A = ["""flax""", """transformers"""]
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls , *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
requires_backends(cls , ['flax', 'transformers'] )
| 187
| 0
|
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0 ):
__lowercase = -1
__lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowercase = n - a - b
if c * c == (a * a + b * b):
__lowercase = a * b * c
if candidate >= product:
__lowercase = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=0 ) -> int:
"""simple docstring"""
if name is None:
lowerCAmelCase_ : Dict = None
else:
lowerCAmelCase_ : Dict = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
lowerCAmelCase_ : Tuple = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , ":" , val.size() )
else:
print(__UpperCamelCase , ":" , __UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase_ : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase_ : List[str] = param.view(*__UpperCamelCase )
lowerCAmelCase_ : int = param.transpose(0 , 2 )
lowerCAmelCase_ : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase_ : Optional[Any] = param.view(*__UpperCamelCase )
lowerCAmelCase_ : Dict = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase_ : str = param.view(*__UpperCamelCase )
return param
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = {}
# old versions did not store training args
lowerCAmelCase_ : List[Any] = input_state_dict.get("args" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase_ : List[Any] = ds_args.padded_vocab_size
lowerCAmelCase_ : str = ds_args.max_position_embeddings
lowerCAmelCase_ : Tuple = ds_args.hidden_size
lowerCAmelCase_ : Any = ds_args.num_layers
lowerCAmelCase_ : Union[str, Any] = ds_args.num_attention_heads
lowerCAmelCase_ : Union[str, Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase_ : Optional[int] = config.n_head
# The hidden_size per head.
lowerCAmelCase_ : List[str] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase_ : List[str] = input_state_dict["checkpoint_version"]
else:
lowerCAmelCase_ : Dict = 0.0
# The model.
lowerCAmelCase_ : Union[str, Any] = input_state_dict["model"]
# The language model.
lowerCAmelCase_ : str = model["language_model"]
# The embeddings.
lowerCAmelCase_ : Tuple = lm["embedding"]
# The word embeddings.
lowerCAmelCase_ : Union[str, Any] = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase_ : Union[str, Any] = word_embeddings[: config.vocab_size, :]
lowerCAmelCase_ : Union[str, Any] = word_embeddings
# The position embeddings.
lowerCAmelCase_ : Any = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase_ : Optional[int] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowerCAmelCase_ : Tuple = pos_embeddings
# The transformer.
lowerCAmelCase_ : Optional[int] = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowerCAmelCase_ : Dict = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowerCAmelCase_ : List[Any] = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase_ : Tuple = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase_ : Dict = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase_ : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase_ : Any = m.group(3 )
# The name of the layer.
lowerCAmelCase_ : Optional[int] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowerCAmelCase_ : Union[str, Any] = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowerCAmelCase_ : Tuple = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : str = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase_ : Any = torch.tensor(-1e4 , dtype=torch.floataa )
lowerCAmelCase_ : int = masked_bias
lowerCAmelCase_ : Union[str, Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase_ : List[str] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase_ : List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
lowerCAmelCase_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase_ : Any = megatron_to_transformers[op_name]
lowerCAmelCase_ : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase_ : int = megatron_to_transformers[op_name]
lowerCAmelCase_ : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase_ : Any = transformer["final_layernorm.weight"]
lowerCAmelCase_ : str = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase_ : Tuple = word_embeddings
# It should be done!
return output_state_dict
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=__UpperCamelCase , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=__UpperCamelCase , help="An optional config json file describing the pre-trained model." , )
lowerCAmelCase_ : Tuple = parser.parse_args()
# Extract the basename.
lowerCAmelCase_ : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowerCAmelCase_ : List[str] = torch.load(__UpperCamelCase , map_location="cpu" )
else:
lowerCAmelCase_ : Union[str, Any] = torch.load(args.path_to_checkpoint , map_location="cpu" )
lowerCAmelCase_ : Any = input_state_dict.get("args" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase_ : Any = "gelu_fast"
elif ds_args.openai_gelu:
lowerCAmelCase_ : List[str] = "gelu_new"
else:
lowerCAmelCase_ : List[Any] = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase_ : List[Any] = "gelu_new"
# Spell out all parameters in case the defaults change.
lowerCAmelCase_ : List[str] = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase_ : List[str] = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase_ : Optional[int] = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowerCAmelCase_ : str = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase_ : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase_ : int = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase_ : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowerCAmelCase_ : Dict = "gpt2"
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = type(__UpperCamelCase ).__name__
lowerCAmelCase_ : Any = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
lowerCAmelCase_ : int = os.path.join(__UpperCamelCase , "pytorch_model.bin" )
print(f'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 610
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] )
def __lowerCamelCase ( __UpperCamelCase ) -> bytes:
"""simple docstring"""
if (len(__UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610
| 1
|
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase__ : Optional[Any] = "sshleifer/bart-tiny-random"
lowerCAmelCase__ : Dict = "patrickvonplaten/t5-tiny-random"
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return AutoConfig.from_pretrained(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , *__UpperCAmelCase : Any = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , *__UpperCAmelCase : List[str] = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase , *__UpperCAmelCase : Any = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_ ):
create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=UpperCAmelCase_ , d=UpperCAmelCase_ )
| 329
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '''upernet'''
def __init__( self : Tuple , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Tuple=[1, 2, 3, 6] , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=0.4 , UpperCAmelCase_ : int=384 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : int=255 , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__UpperCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : Dict = backbone_config.get("model_type" )
__UpperCAmelCase : List[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : List[Any] = config_class.from_dict(UpperCAmelCase_ )
__UpperCAmelCase : Dict = backbone_config
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Dict = pool_scales
__UpperCAmelCase : Optional[Any] = use_auxiliary_head
__UpperCAmelCase : Dict = auxiliary_loss_weight
__UpperCAmelCase : Optional[Any] = auxiliary_in_channels
__UpperCAmelCase : str = auxiliary_channels
__UpperCAmelCase : Optional[Any] = auxiliary_num_convs
__UpperCAmelCase : str = auxiliary_concat_input
__UpperCAmelCase : Optional[Any] = loss_ignore_index
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : List[Any] = self.backbone_config.to_dict()
__UpperCAmelCase : Any = self.__class__.model_type
return output
| 329
| 1
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ):
_a = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_a = Vector()
def _UpperCAmelCase ( self : Tuple ):
_a = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '(0,0,0,0,0,1)' )
def _UpperCAmelCase ( self : Optional[int] ):
_a = Vector([1, 2, 3, 4] )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = Vector([1, 2] )
_a = Vector([1, 2, 3, 4, 5] )
_a = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_a = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _UpperCAmelCase ( self : int ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _UpperCAmelCase ( self : str ):
_a = Vector([1, 2, 3] )
_a = Vector([2, -1, 4] ) # for test of dot product
_a = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def _UpperCAmelCase ( self : Tuple ):
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def _UpperCAmelCase ( self : Any ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def _UpperCAmelCase ( self : Any ):
_a = Vector([1, 2, 3] )
_a = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , '(3,4,7)' )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = Vector([1, 0, 0, 0, 0, 0] )
_a = x.copy()
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , str(SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( self : Any ):
_a = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(SCREAMING_SNAKE_CASE_ ) , '(0,1,0)' )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( self : Optional[int] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( self : str ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_a = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(SCREAMING_SNAKE_CASE_ ) )
def _UpperCAmelCase ( self : Dict ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _UpperCAmelCase ( self : Any ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def _UpperCAmelCase ( self : List[Any] ):
_a = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_a = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def _UpperCAmelCase ( self : Tuple ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 562
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : str=3_2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : Tuple=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : List[Any]="relu" , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = embeddings_size
_a = hidden_sizes
_a = depths
_a = is_training
_a = use_labels
_a = hidden_act
_a = num_labels
_a = scope
_a = len(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
_a = TFResNetModel(config=SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
_a = self.num_labels
_a = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ )
_a = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_A = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
_A = False
def _UpperCAmelCase ( self : List[str] ):
_a = TFResNetModelTester(self )
_a = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : Any ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _UpperCAmelCase ( self : int ):
pass
def _UpperCAmelCase ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ):
_a = model_class(SCREAMING_SNAKE_CASE_ )
_a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_a = layer_type
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : str ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
_a = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
_a = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
_a = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 562
| 1
|
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase ( UpperCAmelCase : str, UpperCAmelCase : Any, UpperCAmelCase : Optional[int]=None, **UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
__magic_name__ : Any = [x.strip() for x in open(UpperCAmelCase ).readlines()]
__magic_name__ : Union[str, Any] = [x.strip() for x in open(UpperCAmelCase ).readlines()][: len(UpperCAmelCase )]
__magic_name__ : str = calculate_rouge(UpperCAmelCase, UpperCAmelCase, **UpperCAmelCase )
if save_path is not None:
save_json(UpperCAmelCase, UpperCAmelCase, indent=UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 712
|
def lowerCAmelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__magic_name__ : List[str] = [True] * (num + 1)
__magic_name__ : List[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, UpperCAmelCase ):
__magic_name__ : Any = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 336
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
_lowercase = ord(snake_case__ )
if not _is_chinese_char(snake_case__ ):
return 0
return 1
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> str:
_lowercase = set()
for token in tokens:
_lowercase = len(snake_case__ ) > 1 and is_chinese(snake_case__ )
if chinese_word:
word_set.add(snake_case__ )
_lowercase = list(snake_case__ )
return word_list
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
_lowercase = max([len(snake_case__ ) for w in chinese_word_set] )
_lowercase = bert_tokens
_lowercase , _lowercase = 0, len(snake_case__ )
while start < end:
_lowercase = True
if is_chinese(bert_word[start] ):
_lowercase = min(end - start , snake_case__ )
for i in range(snake_case__ , 1 , -1 ):
_lowercase = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowercase = '##' + bert_word[j]
_lowercase = start + i
_lowercase = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :LTP , snake_case__ :BertTokenizer ) -> Optional[Any]:
_lowercase = []
for i in range(0 , len(snake_case__ ) , 100 ):
_lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
_lowercase = [get_chinese_word(snake_case__ ) for r in res]
ltp_res.extend(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
_lowercase = []
for i in range(0 , len(snake_case__ ) , 100 ):
_lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(snake_case__ ) == len(snake_case__ )
_lowercase = []
for input_ids, chinese_word in zip(snake_case__ , snake_case__ ):
_lowercase = []
for id in input_ids:
_lowercase = bert_tokenizer._convert_id_to_token(snake_case__ )
input_tokens.append(snake_case__ )
_lowercase = add_sub_symbol(snake_case__ , snake_case__ )
_lowercase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(snake_case__ ):
if token[:2] == "##":
_lowercase = token[2:]
# save chinese tokens' pos
if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ):
ref_id.append(snake_case__ )
ref_ids.append(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
return ref_ids
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Any:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_lowercase = f.readlines()
_lowercase = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowercase = LTP(args.ltp ) # faster in GPU device
_lowercase = BertTokenizer.from_pretrained(args.bert )
_lowercase = prepare_ref(snake_case__ , snake_case__ , snake_case__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_lowercase = [json.dumps(snake_case__ ) + '\n' for ref in ref_ids]
f.writelines(snake_case__ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
snake_case = parser.parse_args()
main(args)
| 67
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = 0
UpperCamelCase__ = False
UpperCamelCase__ = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=a__).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {'''a''': 2, '''c''': 2.2_5})
@require_cuda
def snake_case_ ( self):
# If no defaults are changed, `to_kwargs` returns an empty dict.
A__ = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2)
AcceleratorState._reset_state()
A__ = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
A__ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 2_0_0_0)
self.assertEqual(scaler._enabled , a__)
@require_multi_gpu
def snake_case_ ( self):
A__ = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(a__ , env=os.environ.copy())
if __name__ == "__main__":
_lowercase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_lowercase = Accelerator(kwargs_handlers=[ddp_scaler])
_lowercase = torch.nn.Linear(100, 200)
_lowercase = accelerator.prepare(model)
# Check the values changed in kwargs
_lowercase = ""
_lowercase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 632
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCAmelCase ( A__ , A__ ) -> np.array:
_snake_case : List[str] = f'''{sampling_rate}'''
_snake_case : List[Any] = '''1'''
_snake_case : Optional[int] = '''f32le'''
_snake_case : int = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(a_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_snake_case : List[str] = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_snake_case : str = output_stream[0]
_snake_case : List[Any] = np.frombuffer(a_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def UpperCAmelCase ( A__ , A__ , A__ = "f32le" , ) -> Dict:
_snake_case : List[Any] = f'''{sampling_rate}'''
_snake_case : int = '''1'''
if format_for_conversion == "s16le":
_snake_case : Optional[int] = 2
elif format_for_conversion == "f32le":
_snake_case : Optional[int] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_snake_case : int = platform.system()
if system == "Linux":
_snake_case : Optional[Any] = '''alsa'''
_snake_case : Optional[int] = '''default'''
elif system == "Darwin":
_snake_case : Tuple = '''avfoundation'''
_snake_case : List[Any] = ''':0'''
elif system == "Windows":
_snake_case : int = '''dshow'''
_snake_case : Optional[int] = '''default'''
_snake_case : int = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_snake_case : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_snake_case : List[str] = _ffmpeg_stream(a_ , a_ )
for item in iterator:
yield item
def UpperCAmelCase ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> Optional[Any]:
if stream_chunk_s is not None:
_snake_case : Optional[Any] = stream_chunk_s
else:
_snake_case : Tuple = chunk_length_s
_snake_case : Tuple = ffmpeg_microphone(a_ , a_ , format_for_conversion=a_ )
if format_for_conversion == "s16le":
_snake_case : Union[str, Any] = np.intaa
_snake_case : Optional[int] = 2
elif format_for_conversion == "f32le":
_snake_case : int = np.floataa
_snake_case : List[Any] = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_snake_case : List[str] = chunk_length_s / 6
_snake_case : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_ , (int, float) ):
_snake_case : Any = [stride_length_s, stride_length_s]
_snake_case : int = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_snake_case : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_snake_case : Any = datetime.datetime.now()
_snake_case : Dict = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_ , a_ , stride=(stride_left, stride_right) , stream=a_ ):
# Put everything back in numpy scale
_snake_case : str = np.frombuffer(item["""raw"""] , dtype=a_ )
_snake_case : str = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_snake_case : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCAmelCase ( A__ , A__ , A__ , A__ = False ) -> Any:
_snake_case : List[str] = b''''''
_snake_case : Optional[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_snake_case : str = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
_snake_case : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
_snake_case : List[Any] = (_stride_left, stride_right)
_snake_case : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_snake_case : List[str] = False
yield item
_snake_case : str = stride_left
_snake_case : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
_snake_case : int = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_snake_case : Optional[Any] = False
yield item
def UpperCAmelCase ( A__ , A__ ) -> int:
_snake_case : int = 2**24 # 16Mo
try:
with subprocess.Popen(a_ , stdout=subprocess.PIPE , bufsize=a_ ) as ffmpeg_process:
while True:
_snake_case : Optional[Any] = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 713
|
from timeit import timeit
UpperCAmelCase_ = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCAmelCase ( A__ ) -> bool:
_snake_case : Dict = 0
_snake_case : Optional[Any] = len(A__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCAmelCase ( A__ ) -> bool:
_snake_case : Optional[Any] = len(A__ ) // 2
_snake_case : List[Any] = len(A__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(A__ ) )
def UpperCAmelCase ( A__ ) -> bool:
if len(A__ ) <= 2:
return True
if s[0] == s[len(A__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCAmelCase ( A__ ) -> bool:
return s == s[::-1]
def UpperCAmelCase ( A__ ) -> None:
_snake_case : Any = f'''all({name}(key) is value for key, value in test_data.items())'''
_snake_case : Tuple = f'''from __main__ import test_data, {name}'''
_snake_case : Any = 50_00_00
_snake_case : int = timeit(stmt=A__ , setup=A__ , number=A__ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 519
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Tuple = 'imagegpt'
__lowerCAmelCase : Tuple = ['past_key_values']
__lowerCAmelCase : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=5_12 + 1 , SCREAMING_SNAKE_CASE_=32 * 32 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = n_positions
lowercase__ : List[str] = n_embd
lowercase__ : Tuple = n_layer
lowercase__ : Optional[Any] = n_head
lowercase__ : List[str] = n_inner
lowercase__ : Union[str, Any] = activation_function
lowercase__ : Union[str, Any] = resid_pdrop
lowercase__ : Optional[int] = embd_pdrop
lowercase__ : int = attn_pdrop
lowercase__ : str = layer_norm_epsilon
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Any = scale_attn_weights
lowercase__ : str = use_cache
lowercase__ : str = scale_attn_by_inverse_layer_idx
lowercase__ : Optional[int] = reorder_and_upcast_attn
lowercase__ : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
])
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ):
'''simple docstring'''
lowercase__ : List[Any] = self._generate_dummy_images(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = dict(preprocessor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_))
return inputs
| 12
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionSAGPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase__ = False
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
torch.manual_seed(0)
__lowercase =UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__lowercase =DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0)
__lowercase =AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
__lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowercase =CLIPTextModel(_lowerCAmelCase)
__lowercase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any]=0):
'''simple docstring'''
if str(_lowerCAmelCase).startswith('mps'):
__lowercase =torch.manual_seed(_lowerCAmelCase)
else:
__lowercase =torch.Generator(device=_lowerCAmelCase).manual_seed(_lowerCAmelCase)
__lowercase ={
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np')
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np')
__lowercase =output.images
__lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base')
__lowercase =sag_pipe.to(_lowerCAmelCase)
sag_pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='.'
__lowercase =torch.manual_seed(0)
__lowercase =sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=_lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
__lowercase =output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 474
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Tuple = '''falcon'''
a_ : Optional[int] = ['''past_key_values''']
def __init__(self , UpperCAmelCase=6_5_0_2_4 , UpperCAmelCase=4_5_4_4 , UpperCAmelCase=3_2 , UpperCAmelCase=7_1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1_1 , UpperCAmelCase=1_1 , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =vocab_size
# Backward compatibility with n_embed kwarg
__UpperCAmelCase =kwargs.pop('''n_embed''' , UpperCAmelCase)
__UpperCAmelCase =hidden_size if n_embed is None else n_embed
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =layer_norm_epsilon
__UpperCAmelCase =initializer_range
__UpperCAmelCase =use_cache
__UpperCAmelCase =hidden_dropout
__UpperCAmelCase =attention_dropout
__UpperCAmelCase =bos_token_id
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =num_attention_heads if num_kv_heads is None else num_kv_heads
__UpperCAmelCase =alibi
__UpperCAmelCase =new_decoder_architecture
__UpperCAmelCase =multi_query # Ignored when new_decoder_architecture is True
__UpperCAmelCase =parallel_attn
__UpperCAmelCase =bias
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase)
@property
def A__ (self):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def A__ (self):
'''simple docstring'''
return not self.alibi
| 142
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase ):
a_ : Dict = '''focalnet'''
def __init__(self , UpperCAmelCase=2_2_4 , UpperCAmelCase=4 , UpperCAmelCase=3 , UpperCAmelCase=9_6 , UpperCAmelCase=False , UpperCAmelCase=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , UpperCAmelCase=[2, 2, 6, 2] , UpperCAmelCase=[2, 2, 2, 2] , UpperCAmelCase=[3, 3, 3, 3] , UpperCAmelCase="gelu" , UpperCAmelCase=4.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=False , UpperCAmelCase=1e-4 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=3_2 , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase)
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =use_conv_embed
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =focal_levels
__UpperCAmelCase =focal_windows
__UpperCAmelCase =hidden_act
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =use_layerscale
__UpperCAmelCase =layerscale_value
__UpperCAmelCase =use_post_layernorm
__UpperCAmelCase =use_post_layernorm_in_modulation
__UpperCAmelCase =normalize_modulator
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =encoder_stride
__UpperCAmelCase =['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths) + 1)]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=UpperCAmelCase , out_indices=UpperCAmelCase , stage_names=self.stage_names)
| 142
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : str = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase = TaTokenizerFast
lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 707
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.type_sequence_label_size
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case_ = problem_type['''title''']
snake_case_ = problem_type['''num_labels''']
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
snake_case_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
snake_case_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
| 531
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
return [sentence[i : i + ngram_size] for i in range(len(__lowerCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase_ = _symbol_database.Default()
lowercase_ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowercase_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase_ = None
lowercase_ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase_ = 45
lowercase_ = 15_81
lowercase_ = 15_17
lowercase_ = 15_70
lowercase_ = 15_84
lowercase_ = 17_93
lowercase_ = 17_95
lowercase_ = 19_16
lowercase_ = 18_64
lowercase_ = 19_05
lowercase_ = 19_19
lowercase_ = 24_29
lowercase_ = 22_08
lowercase_ = 24_18
lowercase_ = 23_23
lowercase_ = 24_07
# @@protoc_insertion_point(module_scope)
| 552
| 0
|
"""simple docstring"""
def snake_case_ ( A_ : int = 10, A_ : int = 22 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = range(1, A_ )
_lowerCamelCase : Dict = range(1, A_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 598
|
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase__ = os.path.join(git_repo_path, '''src''', '''transformers''')
lowerCAmelCase__ = '''
{0} = None
'''
lowerCAmelCase__ = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
lowerCAmelCase__ = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(__lowerCAmelCase )
_lowerCamelCase : List[str] = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(__lowerCAmelCase , '''tokenizers''' )
_lowerCamelCase : Optional[int] = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(__lowerCAmelCase , '''tensorflow_text''' )
_lowerCamelCase : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tokenizers''' )
_lowerCamelCase : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tensorflow_text''' )
_lowerCamelCase : List[str] = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(__lowerCAmelCase , '''sentencepiece_and_tokenizers_and_vision''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __lowerCAmelCase )
self.assertIn('''tensorflow_text''' , __lowerCAmelCase )
self.assertIn('''sentencepiece_and_tokenizers''' , __lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__lowerCAmelCase , '''\nCONSTANT = None\n''' )
_lowerCamelCase : Union[str, Any] = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__lowerCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_lowerCamelCase : Union[str, Any] = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_lowerCamelCase : str = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
_lowerCamelCase : Optional[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __lowerCAmelCase )
| 598
| 1
|
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowercase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any]=3 , snake_case : List[Any]=7 , snake_case : List[str]=True , snake_case : Tuple=True , snake_case : Any=False , snake_case : List[str]=True , snake_case : List[str]=99 , snake_case : Tuple=32 , snake_case : Union[str, Any]=5 , snake_case : str=4 , snake_case : Dict=37 , snake_case : str="gelu" , snake_case : List[Any]=0.1 , snake_case : str=0.1 , snake_case : Union[str, Any]=512 , snake_case : Optional[int]=16 , snake_case : Tuple=2 , snake_case : List[str]=0.02 , snake_case : int=3 , snake_case : Optional[Any]=4 , snake_case : int=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : List[str] = is_training
SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask
SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=snake_case , )
def lowerCamelCase_ ( self : List[str] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FalconModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case , attention_mask=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : int , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str , snake_case : Any , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : List[str] = FalconModel(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
SCREAMING_SNAKE_CASE : Optional[Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
SCREAMING_SNAKE_CASE : Optional[int] = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Tuple , snake_case : Optional[Any] , snake_case : int , snake_case : List[str] , snake_case : Dict , snake_case : List[str] , snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] , snake_case : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FalconForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = FalconForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE : str = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE : Optional[int] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
SCREAMING_SNAKE_CASE : List[str] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1E-3 ) )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : str = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Any = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : Tuple = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Dict = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = FalconModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE : Optional[Any] = alibi
self.model_tester.create_and_check_model(snake_case , *snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = 3
SCREAMING_SNAKE_CASE : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE : List[Any] = input_ids.ne(1 ).to(snake_case )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = FalconForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = 3
SCREAMING_SNAKE_CASE : Dict = 'single_label_classification'
SCREAMING_SNAKE_CASE : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids.ne(1 ).to(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = FalconForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE : Any = FalconForCausalLM(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(snake_case , use_cache=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE : Union[str, Any] = model._convert_cache_to_standard_format(snake_case , snake_case )
for layer in range(len(snake_case ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = 3
SCREAMING_SNAKE_CASE : Dict = 'multi_label_classification'
SCREAMING_SNAKE_CASE : Any = input_dict['input_ids']
SCREAMING_SNAKE_CASE : Any = input_ids.ne(1 ).to(snake_case )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE : List[str] = FalconForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(snake_case , 'use_cache' ):
return
SCREAMING_SNAKE_CASE : Any = model_class(snake_case ).to(snake_case )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Any = model(**snake_case )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE : Tuple = (
getattr(snake_case , 'decoder_layers' , snake_case )
or getattr(snake_case , 'num_decoder_layers' , snake_case )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE : Tuple = getattr(snake_case , 'num_kv_heads' , config.num_attention_heads )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(snake_case , 'd_model' , config.hidden_size )
SCREAMING_SNAKE_CASE : List[Any] = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = outputs['past_key_values']
self.assertEqual(len(snake_case ) , snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = inputs['input_ids'].shape
for i in range(snake_case ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE : Optional[int] = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE : int = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowercase ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
SCREAMING_SNAKE_CASE : List[str] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(snake_case )
SCREAMING_SNAKE_CASE : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
SCREAMING_SNAKE_CASE : int = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=19 )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(snake_case )[0]
self.assertEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Tuple = FalconForCausalLM.from_pretrained(snake_case )
model.eval()
model.to(snake_case )
SCREAMING_SNAKE_CASE : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**snake_case , do_sample=snake_case , max_new_tokens=4 )
model.generate(**snake_case , do_sample=snake_case , max_new_tokens=4 )
model.generate(**snake_case , num_beams=2 , max_new_tokens=4 )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = FalconForCausalLM.from_pretrained(snake_case )
model.eval()
model.to(device=snake_case )
SCREAMING_SNAKE_CASE : str = tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE : List[Any] = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 , use_cache=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(**snake_case , do_sample=snake_case , max_new_tokens=20 , use_cache=snake_case )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 352
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# load base model
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : Any = load_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Any = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.base_model_path
_lowerCamelCase : str = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Any = args.lora_prefix_unet
_lowerCamelCase : List[Any] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 352
| 1
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=UpperCamelCase_ , )
assert hasattr(self , '''env''' )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase : Union[str, Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
lowerCAmelCase : Tuple = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
lowerCAmelCase : Optional[int] = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
lowerCAmelCase : List[str] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase_ , py_version='''py36''' , )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ):
TrainingJobAnalytics(UpperCamelCase_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] ):
# create estimator
lowerCAmelCase : Optional[Any] = self.create_estimator(UpperCamelCase_ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase_ )
| 637
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class snake_case_:
def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Any = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : Union[str, Any] = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : Optional[Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Tuple = type_sequence_label_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Optional[int] = num_choices
lowerCAmelCase : Tuple = scope
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Tuple = None
if self.use_input_mask:
lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : int = None
lowerCAmelCase : int = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Tuple ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , )
lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ):
lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ):
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : str = True
lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
lowerCAmelCase : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase : Dict = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
lowerCAmelCase : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0]
# select random slice
lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) : Tuple = config_and_inputs
lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = LlamaModelTester(self )
lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : str = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[str] = 3
lowerCAmelCase : List[str] = input_dict['''input_ids''']
lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : int = '''single_label_classification'''
lowerCAmelCase : Tuple = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = 3
lowerCAmelCase : Dict = '''multi_label_classification'''
lowerCAmelCase : Union[str, Any] = input_dict['''input_ids''']
lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ )
lowerCAmelCase : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
original_model.eval()
lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0}
lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ )
scaled_model.to(UpperCamelCase_ )
scaled_model.eval()
lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state
lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
@require_torch
class snake_case_( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) )
# Expected mean on dim = -1
lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCAmelCase : int = '''Simply put, the theory of relativity states that '''
lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' )
lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ )
# greedy generation outputs
lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ )
lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 637
| 1
|
import random
from .binary_exp_mod import bin_exp_mod
def lowercase__ ( A_: List[str] , A_: Optional[Any]=1000 ) -> List[str]:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCAmelCase =n - 1
__UpperCAmelCase =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCAmelCase =0
while count < prec:
__UpperCAmelCase =random.randint(2 , n - 1 )
__UpperCAmelCase =bin_exp_mod(A_ , A_ , A_ )
if b != 1:
__UpperCAmelCase =True
for _ in range(A_ ):
if b == n - 1:
__UpperCAmelCase =False
break
__UpperCAmelCase =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__A = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 68
|
"""simple docstring"""
def __lowercase ( snake_case_ : int = 2000000 ) ->int:
'''simple docstring'''
__A : List[Any] = [0 for i in range(n + 1 )]
__A : str = 1
__A : Dict = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,snake_case_ ):
__A : List[Any] = 1
__A : Union[str, Any] = 0
for i in range(snake_case_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 177
| 0
|
'''simple docstring'''
import math
import sys
def _SCREAMING_SNAKE_CASE ( A : str ) -> str:
"""simple docstring"""
__snake_case : Dict = ''
try:
with open(A , 'rb' ) as binary_file:
__snake_case : Optional[int] = binary_file.read()
for dat in data:
__snake_case : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( A : str ) -> str:
"""simple docstring"""
__snake_case : str = {'0': '0', '1': '1'}
__snake_case : Tuple = '', ''
__snake_case : Optional[int] = len(A )
for i in range(len(A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Any = lexicon[curr_string]
result += last_match_id
__snake_case : Tuple = last_match_id + '0'
if math.loga(A ).is_integer():
__snake_case : Optional[Any] = {}
for curr_key in list(A ):
__snake_case : Any = lexicon.pop(A )
__snake_case : str = new_lex
__snake_case : Dict = last_match_id + '1'
index += 1
__snake_case : Tuple = ''
return result
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> None:
"""simple docstring"""
__snake_case : int = 8
try:
with open(A , 'wb' ) as opened_file:
__snake_case : str = [
to_write[i : i + byte_length]
for i in range(0 , len(A ) , A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _SCREAMING_SNAKE_CASE ( A : str ) -> str:
"""simple docstring"""
__snake_case : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__snake_case : Any = data_bits[counter:]
__snake_case : str = data_bits[counter + 1 :]
return data_bits
def _SCREAMING_SNAKE_CASE ( A : str , A : str ) -> None:
"""simple docstring"""
__snake_case : List[Any] = read_file_binary(A )
__snake_case : Tuple = remove_prefix(A )
__snake_case : Optional[Any] = decompress_data(A )
write_file_binary(A , A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 715
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class a_ :
def __init__(self , __a , __a = 1_3 , __a = 6_4 , __a = 2 , __a = 3 , __a = 3 , __a = True , __a = True , __a = 1_2_8 , __a=[1_6, 3_2, 6_4, 1_2_8] , __a = 7 , __a = 4 , __a = 3_7 , __a = "gelu" , __a = 0.1 , __a = 0.1 , __a = 1_0 , __a = 0.02 , __a = 2 , __a = 1 , __a = 1_2_8 , __a = [2, 2, 2, 2] , __a = 2 , __a = 2 , ) -> str:
"""simple docstring"""
__snake_case : Optional[Any] = parent
__snake_case : Optional[int] = batch_size
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = patch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = is_training
__snake_case : Tuple = use_labels
__snake_case : Optional[int] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : Dict = type_sequence_label_size
__snake_case : str = initializer_range
__snake_case : int = encoder_stride
__snake_case : List[str] = num_attention_outputs
__snake_case : Optional[Any] = embed_dim
__snake_case : Optional[Any] = embed_dim + 1
__snake_case : List[str] = resolution
__snake_case : Optional[int] = depths
__snake_case : List[Any] = hidden_sizes
__snake_case : List[str] = dim
__snake_case : Union[str, Any] = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = TFEfficientFormerModel(config=__a)
__snake_case : int = model(__a , training=__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a) -> Tuple:
"""simple docstring"""
__snake_case : Dict = self.type_sequence_label_size
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : Optional[int] = model(__a , labels=__a , training=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__snake_case : List[Any] = 1
__snake_case : List[Any] = TFEfficientFormerForImageClassification(__a)
__snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__snake_case : str = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case ,__snake_case ,__snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_snake_case = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_snake_case = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Dict = TFEfficientFormerModelTester(self)
__snake_case : List[Any] = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=3_7)
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings')
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case ,__snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__a)
__snake_case : Union[str, Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[int] = [*signature.parameters.keys()]
__snake_case : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__a , __a , __a):
__snake_case : str = model_class(__a)
__snake_case : List[Any] = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__a) , __a)
if hasattr(self.model_tester , 'encoder_seq_length'):
__snake_case : List[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length') and self.model_tester.chunk_length > 1:
__snake_case : str = seq_length * self.model_tester.chunk_length
else:
__snake_case : Optional[int] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case : List[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple))
self.assertEqual(len(__a) , __a)
__snake_case : List[str] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'decoder_seq_length' , __a)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__a , __a , __a)
def SCREAMING_SNAKE_CASE__ (self , __a , __a , __a=False) -> int:
"""simple docstring"""
__snake_case : Optional[int] = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet')
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a)
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = TFEfficientFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case ,__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Tuple = True
__snake_case : Optional[Any] = getattr(self.model_tester , 'seq_length' , __a)
__snake_case : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , __a)
__snake_case : Tuple = getattr(self.model_tester , 'key_length' , __a)
__snake_case : Optional[Any] = getattr(self.model_tester , 'chunk_length' , __a)
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes'):
__snake_case : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = True
__snake_case : Dict = False
__snake_case : Optional[int] = True
__snake_case : Dict = model_class(__a)
__snake_case : Tuple = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : str = model_class(__a)
__snake_case : str = model(**self._prepare_for_class(__a , __a) , training=__a)
__snake_case : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case ,__snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case : Tuple = model_class(__a)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case : Tuple = model(__a)
self.assertTrue(outputs_dict is not None)
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
__snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300')
__snake_case : Optional[int] = self.default_image_processor
__snake_case : List[Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : List[str] = model(**__a , training=__a)
# verify the logits
__snake_case : str = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300')
__snake_case : List[Any] = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[Any] = image_processor(images=__a , return_tensors='tf')
# forward pass
__snake_case : Optional[int] = model(**__a , training=__a)
# verify the logits
__snake_case : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , __a)
__snake_case : List[str] = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4))
| 61
| 0
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = LayoutLMTokenizer
snake_case_ = LayoutLMTokenizerFast
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Any ):
super().setUp()
__A = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self : Tuple ,**A : int ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**A )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ):
__A = "UNwant\u00E9d,running"
__A = "unwanted, running"
return input_text, output_text
def UpperCamelCase_ ( self : str ):
__A = self.tokenizer_class(self.vocab_file )
__A = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[7, 4, 5, 10, 8, 9] )
def UpperCamelCase_ ( self : int ):
pass
| 55
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCamelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_lowerCamelCase : Optional[Any] =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_lowerCamelCase : str =getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_lowerCamelCase : Optional[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_lowerCamelCase : Tuple =value
elif weight_type == "weight_g":
_lowerCamelCase : Any =value
elif weight_type == "weight_v":
_lowerCamelCase : Any =value
elif weight_type == "bias":
_lowerCamelCase : Dict =value
else:
_lowerCamelCase : Union[str, Any] =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =[]
_lowerCamelCase : Optional[Any] =fairseq_model.state_dict()
_lowerCamelCase : Tuple =hf_model.feature_extractor
_lowerCamelCase : int =hf_model.adapter
for name, value in fairseq_dict.items():
_lowerCamelCase : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCamelCase : Optional[int] =True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCamelCase : Dict =True
if "*" in mapped_key:
_lowerCamelCase : List[str] =name.split(SCREAMING_SNAKE_CASE__ )[0].split('.' )[-2]
_lowerCamelCase : Dict =mapped_key.replace('*' , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_lowerCamelCase : List[Any] ='weight_g'
elif "weight_v" in name:
_lowerCamelCase : int ='weight_v'
elif "bias" in name:
_lowerCamelCase : int ='bias'
elif "weight" in name:
_lowerCamelCase : Optional[int] ='weight'
else:
_lowerCamelCase : Any =None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : str =full_name.split('conv_layers.' )[-1]
_lowerCamelCase : List[Any] =name.split('.' )
_lowerCamelCase : Optional[int] =int(items[0] )
_lowerCamelCase : List[Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_lowerCamelCase : Any =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_lowerCamelCase : int =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_lowerCamelCase : List[Any] =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_lowerCamelCase : Tuple =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =full_name.split('adaptor.' )[-1]
_lowerCamelCase : str =name.split('.' )
if items[1].isdigit():
_lowerCamelCase : Any =int(items[1] )
else:
_lowerCamelCase : int =None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
_lowerCamelCase : str =value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
_lowerCamelCase : str =value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
_lowerCamelCase : Optional[Any] =value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
_lowerCamelCase : Optional[Any] =value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
_lowerCamelCase : List[Any] =value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
_lowerCamelCase : Dict =value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[Any] =emb.weight.shape
_lowerCamelCase : str =nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Optional[Any] =emb.weight.data
return lin_layer
@torch.no_grad()
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
_lowerCamelCase : int =WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , add_adapter=SCREAMING_SNAKE_CASE__ , adapter_stride=SCREAMING_SNAKE_CASE__ , adapter_kernel_size=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , output_hidden_size=SCREAMING_SNAKE_CASE__ , )
_lowerCamelCase : str =MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# load model
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
_lowerCamelCase : Any =model[0].eval()
# load feature extractor
_lowerCamelCase : List[Any] =WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ )
# set weights for wav2vec2 encoder
_lowerCamelCase : List[Any] =WavaVecaModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE__ )
# load decoder weights
_lowerCamelCase : int =MBartForCausalLM(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE__ )
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
_lowerCamelCase : Dict =SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[Any] =False
_lowerCamelCase : Any =MBartaaTokenizer(SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int =hf_wavavec.config.to_dict()
_lowerCamelCase : Optional[int] =tokenizer.pad_token_id
_lowerCamelCase : Tuple =tokenizer.bos_token_id
_lowerCamelCase : Any =tokenizer.eos_token_id
_lowerCamelCase : int ='mbart50'
_lowerCamelCase : Tuple ='wav2vec2'
_lowerCamelCase : Tuple =tokenizer.eos_token_id
_lowerCamelCase : Dict =250_004
_lowerCamelCase : List[str] =tokenizer.eos_token_id
_lowerCamelCase : Dict =SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE__ )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=10_24, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_00_04, type=int, help='`decoder_start_token_id` of model config')
lowerCamelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 464
| 0
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 703
|
def _UpperCamelCase ( lowercase__ ):
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowercase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('doctest').testmod()
| 260
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=4 , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Dict = is_training
lowercase__ : str = use_attention_mask
lowercase__ : Optional[Any] = use_token_type_ids
lowercase__ : List[str] = use_labels
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Tuple = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Optional[Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Optional[int] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : int = num_choices
def __a ( self ) -> str:
"""simple docstring"""
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = None
if self.use_attention_mask:
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : Dict = None
if self.use_token_type_ids:
lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : str = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs
lowercase__ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase( snake_case_ , unittest.TestCase ):
"""simple docstring"""
a : int = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ : Any = FlaxAlbertModelTester(self )
@slow
def __a ( self ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("albert-base-v2" )
lowercase__ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase )
@require_flax
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = FlaxAlbertModel.from_pretrained("albert-base-v2" )
lowercase__ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
lowercase__ : int = (1, 11, 768)
self.assertEqual(output.shape , lowerCamelCase )
lowercase__ : Tuple = np.array(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) )
| 397
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0.999 ,SCREAMING_SNAKE_CASE_="cosine" ,) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(SCREAMING_SNAKE_CASE_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowercase__ : List[str] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = i / num_diffusion_timesteps
lowercase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ) )
return torch.tensor(SCREAMING_SNAKE_CASE_ ,dtype=torch.floataa )
class UpperCAmelCase( snake_case_ , snake_case_ ):
"""simple docstring"""
a : int = 1
@register_to_config
def __init__( self , lowerCamelCase = 1000 , lowerCamelCase = 0.00_01 , lowerCamelCase = 0.02 , lowerCamelCase = "linear" , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 0 , lowerCamelCase = "epsilon" , lowerCamelCase = 1.0 , **lowerCamelCase , ) -> List[Any]:
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , lowerCamelCase ) is not None:
lowercase__ : Any = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
lowercase__ : Optional[int] = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase__ : Optional[int] = torch.tensor(lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase__ : int = torch.linspace(lowerCamelCase , lowerCamelCase , lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Optional[Any] = betas_for_alpha_bar(lowerCamelCase )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowercase__ : List[str] = 1.0 - self.betas
lowercase__ : List[str] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase__ : Dict = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase__ : Optional[int] = 1.0
# setable values
lowercase__ : int = None
lowercase__ : int = torch.from_numpy(np.arange(0 , lowerCamelCase ).copy().astype(np.intaa ) )
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Optional[Any]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowercase__ : Optional[Any] = num_inference_steps
lowercase__ : Dict = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase__ : Dict = (np.arange(0 , lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase__ : Any = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
self.timesteps += self.config.steps_offset
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.0 , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
lowercase__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase__ : Optional[int] = self.alphas_cumprod[timestep]
lowercase__ : Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase__ : Optional[int] = model_output
elif self.config.prediction_type == "sample":
lowercase__ : Any = model_output
lowercase__ : Optional[int] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase__ : List[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase__ : Any = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase__ : Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : int = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase , pred_original_sample=lowerCamelCase )
def __len__( self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 397
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( A__ ):
'''simple docstring'''
def __init__( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self , _snake_case = 1 , _snake_case = None , _snake_case = 0.0 , _snake_case = 50 , _snake_case = "pil" , _snake_case = True , **_snake_case , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCAmelCase = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_snake_case , )
UpperCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCAmelCase = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
UpperCAmelCase = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# decode the image latents with the VAE
UpperCAmelCase = self.vqvae.decode(_snake_case ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 391
|
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )
binary_number += "0" * shift_amount
return binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
UpperCAmelCase = str(bin(A__ ) )[2:]
if shift_amount >= len(A__ ):
return "0b0"
UpperCAmelCase = binary_number[: len(A__ ) - shift_amount]
return "0b" + shifted_binary_number
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
UpperCAmelCase = '''0''' + str(bin(A__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCAmelCase = len(bin(A__ )[3:] ) # Find 2's complement of number
UpperCAmelCase = bin(abs(A__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase = (
'''1''' + '''0''' * (binary_number_length - len(A__ )) + binary_number
)
if shift_amount >= len(A__ ):
return "0b" + binary_number[0] * len(A__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
| 1
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__a : int = get_tests_dir("fixtures/test_sentencepiece.model")
__a : str = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__a : Optional[int] = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CamembertTokenizer
SCREAMING_SNAKE_CASE = CamembertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__A = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """<pad>"""
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(UpperCamelCase_ ) , 1_004 )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = CamembertTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
__A = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__A = """I was born in 92000, and this is falsé."""
__A = tokenizer.encode(UpperCamelCase_ )
__A = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__A = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__A = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__A = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
__A = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = """I was born in 92000, and this is falsé."""
__A = tokenizer.tokenize(UpperCamelCase_ )
__A = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__A = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__A = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(UpperCamelCase_ )
__A = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__A = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=UpperCamelCase_ , )
| 637
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]:
"""simple docstring"""
__A = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
__A = roberta.model.encoder.sentence_encoder
__A = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __lowercase )
__A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = roberta_sent_encoder.embed_tokens.weight
__A = roberta_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__A = roberta_sent_encoder.layer_norm.weight
__A = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = roberta_sent_encoder.layers[i]
__A = layer.attention
__A = roberta_layer.self_attn_layer_norm.weight
__A = roberta_layer.self_attn_layer_norm.bias
# self attention
__A = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__A = roberta_layer.self_attn.q_proj.weight
__A = roberta_layer.self_attn.q_proj.bias
__A = roberta_layer.self_attn.k_proj.weight
__A = roberta_layer.self_attn.k_proj.bias
__A = roberta_layer.self_attn.v_proj.weight
__A = roberta_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__A = roberta_layer.self_attn.out_proj.weight
__A = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__A = roberta_layer.final_layer_norm.weight
__A = roberta_layer.final_layer_norm.bias
# intermediate
__A = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# output
__A = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# end of layer
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].dense.weight
__A = roberta.model.classification_heads["""mnli"""].dense.bias
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight
__A = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__A = roberta.model.encoder.lm_head.dense.weight
__A = roberta.model.encoder.lm_head.dense.bias
__A = roberta.model.encoder.lm_head.layer_norm.weight
__A = roberta.model.encoder.lm_head.layer_norm.bias
__A = roberta.model.encoder.lm_head.weight
__A = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
__A = model(__lowercase )[0]
if classification_head:
__A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) )
else:
__A = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__a : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 637
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase__ ( A_):
'''simple docstring'''
__a : Tuple = '''openai/whisper-base'''
__a : Union[str, Any] = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__a : Any = '''transcriber'''
__a : str = WhisperProcessor
__a : Tuple = WhisperForConditionalGeneration
__a : Union[str, Any] = ['''audio''']
__a : Any = ['''text''']
def A__ ( self , A ) ->Dict:
return self.pre_processor(A , return_tensors='pt' ).input_features
def A__ ( self , A ) ->Union[str, Any]:
return self.model.generate(inputs=A )
def A__ ( self , A ) ->int:
return self.pre_processor.batch_decode(A , skip_special_tokens=A )[0]
| 708
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase__ ( UpperCAmelCase__):
'''simple docstring'''
__a : Optional[int] = ["""image_processor""", """tokenizer"""]
__a : int = """OwlViTImageProcessor"""
__a : Optional[int] = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , A=None , A=None , **A ) ->str:
UpperCAmelCase__ :str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A , )
UpperCAmelCase__ :List[Any] = kwargs.pop('feature_extractor' )
UpperCAmelCase__ :Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A , A )
def __call__( self , A=None , A=None , A=None , A="max_length" , A="np" , **A ) ->Tuple:
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
UpperCAmelCase__ :Optional[Any] = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
UpperCAmelCase__ :Dict = []
# Maximum number of queries across batch
UpperCAmelCase__ :Dict = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
UpperCAmelCase__ :List[str] = t + [' '] * (max_num_queries - len(A ))
UpperCAmelCase__ :List[str] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
UpperCAmelCase__ :Any = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :int = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ :int = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ :List[str] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
UpperCAmelCase__ :int = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ :List[str] = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
UpperCAmelCase__ :List[str] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Any = input_ids
UpperCAmelCase__ :str = attention_mask
if query_images is not None:
UpperCAmelCase__ :Optional[int] = BatchEncoding()
UpperCAmelCase__ :Tuple = self.image_processor(
A , return_tensors=A , **A ).pixel_values
UpperCAmelCase__ :str = query_pixel_values
if images is not None:
UpperCAmelCase__ :Optional[int] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
UpperCAmelCase__ :Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ :int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process(*A , **A )
def A__ ( self , *A , **A ) ->Tuple:
return self.image_processor.post_process_object_detection(*A , **A )
def A__ ( self , *A , **A ) ->Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A__ ( self , *A , **A ) ->Optional[int]:
return self.tokenizer.batch_decode(*A , **A )
def A__ ( self , *A , **A ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A , )
return self.image_processor_class
@property
def A__ ( self ) ->Dict:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A , )
return self.image_processor
| 433
| 0
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __A ( _SCREAMING_SNAKE_CASE : BertModel , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
__SCREAMING_SNAKE_CASE : str = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Any = model.state_dict()
def to_tf_var_name(_SCREAMING_SNAKE_CASE : str ):
for patt, repl in iter(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : int = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f'bert/{name}'
def create_tf_var(_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : tf.Session ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__SCREAMING_SNAKE_CASE : Any = to_tf_var_name(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__SCREAMING_SNAKE_CASE : int = torch_tensor.T
__SCREAMING_SNAKE_CASE : Any = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = session.run(UpperCamelCase__ )
print(f'Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}' )
__SCREAMING_SNAKE_CASE : Tuple = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __A ( _SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Directory in which to save tensorflow model" )
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Dict = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 211
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCAmelCase ( UpperCamelCase__ : BertModel , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
__UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
__UpperCAmelCase = model.state_dict()
def to_tf_var_name(UpperCamelCase__ : str ):
for patt, repl in iter(UpperCamelCase__ ):
__UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return f"""bert/{name}"""
def create_tf_var(UpperCamelCase__ : np.ndarray , UpperCamelCase__ : str , UpperCamelCase__ : tf.Session ):
__UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
__UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
__UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
__UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
__UpperCAmelCase = torch_tensor.T
__UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = session.run(UpperCamelCase__ )
print(f"""Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}""" )
__UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def lowerCAmelCase ( UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
__UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
__UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 262
| 0
|
"""simple docstring"""
from __future__ import annotations
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def _a ( _SCREAMING_SNAKE_CASE = 1_000_000 ) -> int:
snake_case_ = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 2
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[str]=99 , UpperCAmelCase_ : Dict=24 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=1_000 , ) ->Tuple:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ) ->str:
"""simple docstring"""
snake_case_ = LiltModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
snake_case_ = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , ) ->Dict:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Optional[int] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase: Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Union[str, Any] = False
__lowercase: List[str] = False
def lowerCAmelCase ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return True
def lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
@slow
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(UpperCAmelCase_ )
snake_case_ = torch.tensor([[1, 2]] , device=UpperCAmelCase_ )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=UpperCAmelCase_ , bbox=UpperCAmelCase_ )
snake_case_ = torch.Size([1, 2, 768] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=UpperCAmelCase_ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase_ , atol=1E-3 ) )
| 2
| 1
|
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE : str = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE : int = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
SCREAMING_SNAKE_CASE : Tuple = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
def remove_articles(lowerCamelCase_ ):
_lowercase : Any = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(lowerCamelCase_ , ' ' , lowerCamelCase_ )
def white_space_fix(lowerCamelCase_ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase_ ):
_lowercase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
return int(normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
_lowercase : str = [any(compute_exact(lowerCamelCase_ , lowerCamelCase_ ) for ref in refs ) for pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ )]
return (sum(lowerCamelCase_ ) / len(lowerCamelCase_ )) * 100
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowercase : Any = Counter(lowerCamelCase_ )
_lowercase : Optional[Any] = Counter(lowerCamelCase_ )
_lowercase : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
_lowercase : Optional[Any] = scount * numref
_lowercase : Union[str, Any] = Counter(lowerCamelCase_ )
_lowercase : List[Any] = Counter()
for cgram, ccount in cgramcounter.items():
_lowercase : List[str] = ccount * numref
# KEEP
_lowercase : Tuple = sgramcounter_rep & cgramcounter_rep
_lowercase : int = keepgramcounter_rep & rgramcounter
_lowercase : List[Any] = sgramcounter_rep & rgramcounter
_lowercase : str = 0
_lowercase : int = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Optional[int] = keeptmpscorea / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowercase : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowercase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowercase : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowercase : Optional[int] = sgramcounter_rep - cgramcounter_rep
_lowercase : Optional[int] = delgramcounter_rep - rgramcounter
_lowercase : Dict = sgramcounter_rep - rgramcounter
_lowercase : List[str] = 0
_lowercase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : int = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Tuple = deltmpscorea / len(lowerCamelCase_ )
# ADDITION
_lowercase : Any = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : Optional[Any] = set(lowerCamelCase_ ) & set(lowerCamelCase_ )
_lowercase : int = set(lowerCamelCase_ ) - set(lowerCamelCase_ )
_lowercase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowercase : Union[str, Any] = 1
_lowercase : Tuple = 1
if len(lowerCamelCase_ ) > 0:
_lowercase : Optional[int] = addtmpscore / len(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Any = addtmpscore / len(lowerCamelCase_ )
_lowercase : Optional[int] = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowercase : Union[str, Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
_lowercase : List[str] = len(lowerCamelCase_ )
_lowercase : List[str] = ssent.split(' ' )
_lowercase : Optional[int] = csent.split(' ' )
_lowercase : Dict = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[Any] = []
_lowercase : Union[str, Any] = []
_lowercase : Union[str, Any] = []
_lowercase : Any = []
_lowercase : Optional[Any] = []
_lowercase : Dict = []
_lowercase : Optional[Any] = []
_lowercase : int = []
for rsent in rsents:
_lowercase : Optional[int] = rsent.split(' ' )
_lowercase : List[Any] = []
_lowercase : str = []
_lowercase : Dict = []
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[int] = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : List[Any] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : List[str] = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
ragramslist.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : Optional[Any] = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : Dict = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : Tuple = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(lowerCamelCase_ )
for i in range(0 , len(lowerCamelCase_ ) - 1 ):
if i < len(lowerCamelCase_ ) - 1:
_lowercase : str = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 2:
_lowercase : Optional[Any] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(lowerCamelCase_ )
if i < len(lowerCamelCase_ ) - 3:
_lowercase : List[str] = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : int = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
((_lowercase) , (_lowercase) , (_lowercase)) : Optional[int] = SARIngram(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowercase : Tuple = sum([delascore, delascore, delascore, delascore] ) / 4
_lowercase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
_lowercase : str = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ = True , lowerCamelCase_ = "13a" , lowerCamelCase_ = True ) -> Optional[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowercase : int = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowercase : List[Any] = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase_ )()(lowerCamelCase_ )
else:
_lowercase : Tuple = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase_ )
elif tokenizer == "moses":
_lowercase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ , escape=lowerCamelCase_ )
elif tokenizer == "penn":
_lowercase : Dict = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase_ , return_str=lowerCamelCase_ )
else:
_lowercase : Any = sentence
if not return_str:
_lowercase : str = normalized_sent.split()
return normalized_sent
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
if not (len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == len(lowerCamelCase_ )):
raise ValueError('Sources length must match predictions and references lengths.' )
_lowercase : Optional[Any] = 0
for src, pred, refs in zip(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
sari_score += SARIsent(normalize(lowerCamelCase_ ) , normalize(lowerCamelCase_ ) , [normalize(lowerCamelCase_ ) for sent in refs] )
_lowercase : Dict = sari_score / len(lowerCamelCase_ )
return 100 * sari_score
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="exp" , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ) -> List[str]:
_lowercase : Tuple = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_lowercase : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_lowercase : List[Any] = sacrebleu.corpus_bleu(
lowerCamelCase_ , lowerCamelCase_ , smooth_method=lowerCamelCase_ , smooth_value=lowerCamelCase_ , force=lowerCamelCase_ , lowercase=lowerCamelCase_ , use_effective_order=lowerCamelCase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowerCamelCase( datasets.Metric ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence'),
'references': datasets.Sequence(datasets.Value('string', id='sequence'), id='references'),
}), codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
], reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
], )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Union[str, Any] = {}
result.update({'sari': compute_sari(sources=lowerCamelCase, predictions=lowerCamelCase, references=lowerCamelCase)})
result.update({'sacrebleu': compute_sacrebleu(predictions=lowerCamelCase, references=lowerCamelCase)})
result.update({'exact': compute_em(predictions=lowerCamelCase, references=lowerCamelCase)})
return result
| 89
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCAmelCase__ :List[str] = get_logger()
UpperCAmelCase__ :Optional[dict] = None
class SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Any , A__ : List[str]=None , A__ : Tuple=None , **A__ : Dict ):
"""simple docstring"""
super().__init__(features=A__ )
import jax
from jaxlib.xla_client import Device
if isinstance(A__ , A__ ):
raise ValueError(
f"Expected {device} to be a `str` not {type(A__ )}, as `jaxlib.xla_extension.Device` "
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
__lowerCamelCase : Optional[int] = device if isinstance(A__ , A__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"Device with string identifier {self.device} not listed among the available "
f"devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default "
f"device: {str(jax.devices()[0] )}." )
__lowerCamelCase : str = str(jax.devices()[0] )
__lowerCamelCase : Optional[Any] = jnp_array_kwargs
@staticmethod
def a_ ( ):
"""simple docstring"""
import jax
return {str(A__ ): device for device in jax.devices()}
def a_ ( self : Any , A__ : List[str] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A__ , A__ ) and column:
if all(
isinstance(A__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A__ , axis=0 )
return column
def a_ ( self : Tuple , A__ : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(A__ , (str, bytes, type(A__ )) ):
return value
elif isinstance(A__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__lowerCamelCase : Dict = {}
if isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__lowerCamelCase : List[Any] = {"""dtype""": jnp.intaa}
else:
__lowerCamelCase : Any = {"""dtype""": jnp.intaa}
elif isinstance(A__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__lowerCamelCase : int = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A__ , PIL.Image.Image ):
__lowerCamelCase : List[str] = np.asarray(A__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__lowerCamelCase : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A__ , **{**default_dtype, **self.jnp_array_kwargs} )
def a_ ( self : int , A__ : Any ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A__ , """__array__""" ) and not isinstance(A__ , jax.Array ):
__lowerCamelCase : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
elif isinstance(A__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A__ ) for substruct in data_struct] )
return self._tensorize(A__ )
def a_ ( self : Tuple , A__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , A__ , map_list=A__ )
def a_ ( self : Any , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_row(A__ )
__lowerCamelCase : str = self.python_features_decoder.decode_row(A__ )
return self.recursive_tensorize(A__ )
def a_ ( self : Dict , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_column(A__ )
__lowerCamelCase : int = self.python_features_decoder.decode_column(A__ , pa_table.column_names[0] )
__lowerCamelCase : Optional[int] = self.recursive_tensorize(A__ )
__lowerCamelCase : List[Any] = self._consolidate(A__ )
return column
def a_ ( self : Tuple , A__ : pa.Table ):
"""simple docstring"""
__lowerCamelCase : Any = self.numpy_arrow_extractor().extract_batch(A__ )
__lowerCamelCase : int = self.python_features_decoder.decode_batch(A__ )
__lowerCamelCase : Union[str, Any] = self.recursive_tensorize(A__ )
for column_name in batch:
__lowerCamelCase : Optional[int] = self._consolidate(batch[column_name] )
return batch
| 150
| 0
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCamelCase ( a ) -> int:
'''simple docstring'''
__magic_name__ = int(SCREAMING_SNAKE_CASE_ )
__magic_name__ , __magic_name__ , __magic_name__ = t // 3600, (t // 60) % 60, t % 60
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def UpperCamelCase ( a , a , a , a , a=300 ) -> Union[str, Any]:
'''simple docstring'''
return F'''\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '''
def UpperCamelCase ( a ) -> Tuple:
'''simple docstring'''
__magic_name__ = '''<table border=\"1\" class=\"dataframe\">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__magic_name__ = F'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else str(SCREAMING_SNAKE_CASE_ )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _SCREAMING_SNAKE_CASE :
__SCREAMING_SNAKE_CASE :Dict = 5
__SCREAMING_SNAKE_CASE :Dict = 0.2
def __init__( self : str , a__ : Dict , a__ : List[str] = None , a__ : Optional[int] = True , a__ : str = None , a__ : str = 300 , ):
__magic_name__ = total
__magic_name__ = '''''' if prefix is None else prefix
__magic_name__ = leave
__magic_name__ = parent
__magic_name__ = width
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
def snake_case__ ( self : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[Any] = False , a__ : List[Any] = None ):
__magic_name__ = value
if comment is not None:
__magic_name__ = comment
if self.last_value is None:
__magic_name__ = __magic_name__ = time.time()
__magic_name__ = __magic_name__ = value
__magic_name__ = __magic_name__ = None
__magic_name__ = self.warmup
__magic_name__ = 1
self.update_bar(A__ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__magic_name__ = time.time()
__magic_name__ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__magic_name__ = self.elapsed_time / (value - self.start_value)
else:
__magic_name__ = None
if value >= self.total:
__magic_name__ = self.total
__magic_name__ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__magic_name__ = self.average_time_per_item * (self.total - value)
self.update_bar(A__ )
__magic_name__ = value
__magic_name__ = current_time
if self.average_time_per_item is None:
__magic_name__ = 1
else:
__magic_name__ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def snake_case__ ( self : Dict , a__ : Optional[int] , a__ : Optional[Any]=None ):
__magic_name__ = ''' ''' * (len(str(self.total ) ) - len(str(A__ ) )) + str(A__ )
if self.elapsed_time is None:
__magic_name__ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__magic_name__ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__magic_name__ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def snake_case__ ( self : Any ):
__magic_name__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__magic_name__ = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : List[Any] ):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class _SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
def __init__( self : List[str] , a__ : Optional[int] , a__ : str=None ):
super().__init__(A__ )
__magic_name__ = None if column_names is None else [column_names]
__magic_name__ = None
def snake_case__ ( self : Tuple ):
__magic_name__ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__magic_name__ = disp.display(disp.HTML(self.html_code ) , display_id=A__ )
else:
self.output.update(disp.HTML(self.html_code ) )
def snake_case__ ( self : Tuple , a__ : str ):
if self.inner_table is None:
__magic_name__ = [list(values.keys() ), list(values.values() )]
else:
__magic_name__ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(A__ )
__magic_name__ = columns
self.inner_table.append([values[c] for c in columns] )
def snake_case__ ( self : Tuple , a__ : Tuple , a__ : Optional[Any]=None , a__ : Optional[int]=300 ):
__magic_name__ = NotebookProgressBar(A__ , prefix=A__ , parent=self , width=A__ )
return self.child_bar
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = None
self.display()
class _SCREAMING_SNAKE_CASE ( _lowerCamelCase ):
def __init__( self : Optional[Any] ):
__magic_name__ = None
__magic_name__ = None
__magic_name__ = False
def snake_case__ ( self : List[Any] , a__ : Optional[int] , a__ : List[Any] , a__ : Tuple , **a__ : str ):
__magic_name__ = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__magic_name__ = NotebookTrainingTracker(state.max_steps , A__ )
def snake_case__ ( self : Dict , a__ : Optional[int] , a__ : Optional[int] , a__ : Any , **a__ : Optional[int] ):
__magic_name__ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__magic_name__ = False
def snake_case__ ( self : List[Any] , a__ : Union[str, Any] , a__ : str , a__ : int , a__ : int=None , **a__ : Tuple ):
if not has_length(A__ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__magic_name__ = self.training_tracker.add_child(len(A__ ) )
else:
__magic_name__ = NotebookProgressBar(len(A__ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def snake_case__ ( self : Optional[Any] , a__ : Any , a__ : str , a__ : Tuple , **a__ : Optional[int] ):
if self.prediction_bar is not None:
self.prediction_bar.close()
__magic_name__ = None
def snake_case__ ( self : Optional[int] , a__ : str , a__ : List[Any] , a__ : Tuple , a__ : Optional[Any]=None , **a__ : Optional[int] ):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__magic_name__ = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__magic_name__ = state.global_step
self.training_tracker.write_line(A__ )
def snake_case__ ( self : Optional[int] , a__ : int , a__ : Tuple , a__ : List[Any] , a__ : Any=None , **a__ : List[Any] ):
if self.training_tracker is not None:
__magic_name__ = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__magic_name__ = log['''loss''']
break
if self.first_column == "Epoch":
__magic_name__ = int(state.epoch )
else:
__magic_name__ = state.global_step
__magic_name__ = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__magic_name__ = re.sub(r'''\_loss$''' , '''''' , A__ )
__magic_name__ = metrics.pop('''total_flos''' , A__ )
__magic_name__ = metrics.pop('''epoch''' , A__ )
__magic_name__ = metrics.pop(F'''{metric_key_prefix}_runtime''' , A__ )
__magic_name__ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , A__ )
__magic_name__ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , A__ )
__magic_name__ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , A__ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__magic_name__ = v
else:
__magic_name__ = k.split('''_''' )
__magic_name__ = ''' '''.join([part.capitalize() for part in splits[1:]] )
__magic_name__ = v
self.training_tracker.write_line(A__ )
self.training_tracker.remove_child()
__magic_name__ = None
# Evaluation takes a long time so we should force the next update.
__magic_name__ = True
def snake_case__ ( self : Tuple , a__ : int , a__ : List[str] , a__ : List[Any] , **a__ : List[Any] ):
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=A__ )
__magic_name__ = None
| 700
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowerCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowerCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
_lowerCAmelCase = {ord(char) for char in VALID_CHARS}
_lowerCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def UpperCamelCase ( a , a ) -> str | None:
'''simple docstring'''
__magic_name__ = ""
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
for keychar, cipherchar in zip(cycle(a ) , a ):
__magic_name__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(a )
return decoded
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
for key in product(a , repeat=3 ):
__magic_name__ = try_key(a , a )
if encoded is not None:
possibles.append(a )
return possibles
def UpperCamelCase ( a , a ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def UpperCamelCase ( a = "p059_cipher.txt" ) -> int:
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = 42
__magic_name__ = Path(a ).parent.joinpath(a ).read_text(encoding='''utf-8''' )
__magic_name__ = [int(a ) for number in data.strip().split(''',''' )]
__magic_name__ = filter_valid_chars(a )
for common_word in COMMON_WORDS:
__magic_name__ = filter_common_word(a , a )
if len(a ) == 1:
break
__magic_name__ = possibles[0]
return sum(ord(a ) for char in decoded_text )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 245
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.