code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase__ ,range(len(lowerCamelCase__ ) ) ) )
SCREAMING_SNAKE_CASE = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
SCREAMING_SNAKE_CASE = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase__ )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
# load decoder from hub
SCREAMING_SNAKE_CASE = """hf-internal-testing/ngram-beam-search-decoder"""
def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,**lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,**lowerCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,lowerCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(lowerCamelCase__ ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCamelCase__ ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor(lowerCamelCase__ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """This is a test string"""
SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Tuple=(2, 10, 16) ,lowerCamelCase__ : List[Any]=77 ) -> Union[str, Any]:
'''simple docstring'''
np.random.seed(lowerCamelCase__ )
return np.random.rand(*lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = decoder.decode_beams(lowerCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ )
else:
with get_context(lowerCamelCase__ ).Pool() as pool:
SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as p:
SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCamelCase__ ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(lowerCamelCase__ ,decoded_processor.logit_score )
self.assertListEqual(lowerCamelCase__ ,decoded_processor.lm_score )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()
SCREAMING_SNAKE_CASE = 15
SCREAMING_SNAKE_CASE = -20.0
SCREAMING_SNAKE_CASE = -4.0
SCREAMING_SNAKE_CASE = processor.batch_decode(
lowerCamelCase__ ,beam_width=lowerCamelCase__ ,beam_prune_logp=lowerCamelCase__ ,token_min_logp=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = decoded_processor_out.text
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
with get_context("""fork""" ).Pool() as pool:
SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
lowerCamelCase__ ,lowerCamelCase__ ,beam_width=lowerCamelCase__ ,beam_prune_logp=lowerCamelCase__ ,token_min_logp=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE = [d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,lowerCamelCase__ )
self.assertTrue(np.array_equal(lowerCamelCase__ ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,lowerCamelCase__ ,atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCamelCase__ ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] ,lowerCamelCase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()
SCREAMING_SNAKE_CASE = 2.0
SCREAMING_SNAKE_CASE = 5.0
SCREAMING_SNAKE_CASE = -20.0
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = processor.batch_decode(
lowerCamelCase__ ,alpha=lowerCamelCase__ ,beta=lowerCamelCase__ ,unk_score_offset=lowerCamelCase__ ,lm_score_boundary=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = decoded_processor_out.text
SCREAMING_SNAKE_CASE = list(lowerCamelCase__ )
decoder.reset_params(
alpha=lowerCamelCase__ ,beta=lowerCamelCase__ ,unk_score_offset=lowerCamelCase__ ,lm_score_boundary=lowerCamelCase__ ,)
with get_context("""fork""" ).Pool() as pool:
SCREAMING_SNAKE_CASE = decoder.decode_beams_batch(
lowerCamelCase__ ,lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = snapshot_download("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = os.listdir(lowerCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE = processor_wavaveca(lowerCamelCase__ ,return_tensors="""np""" )
SCREAMING_SNAKE_CASE = processor_auto(lowerCamelCase__ ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()
SCREAMING_SNAKE_CASE = processor_wavaveca.batch_decode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = processor_auto.batch_decode(lowerCamelCase__ )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_feature_extractor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_decoder()
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM(tokenizer=lowerCamelCase__ ,feature_extractor=lowerCamelCase__ ,decoder=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : Tuple ,lowerCamelCase__ : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE = processor.decode(lowerCamelCase__ ,output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE = self._get_dummy_logits()
SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase__ ,output_word_offsets=lowerCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16000 ) )
SCREAMING_SNAKE_CASE = iter(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = next(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE = processor.decode(logits[0] ,output_word_offsets=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
SCREAMING_SNAKE_CASE = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) ,lowerCamelCase__ )
self.assertEqual(""" """.join(self.get_from_offsets(lowerCamelCase__ ,"""word""" ) ) ,output.text )
# output times
SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase__ ,"""start_time""" ) )
SCREAMING_SNAKE_CASE = torch.tensor(self.get_from_offsets(lowerCamelCase__ ,"""end_time""" ) )
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
SCREAMING_SNAKE_CASE = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=0.01 ) )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=0.01 ) )
| 296
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE_ = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""fake-roberta""" )
os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ )
with open(os.path.join(lowerCamelCase__ ,"""config.json""" ) ,"""w""" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertEqual(type(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""model""" ,lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoConfig.register("""bert""" ,lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""bert-base is not a local folder and is not a valid model identifier""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""bert-base""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,"""hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" ,):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(lowerCamelCase__ ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ ,"""NewModelConfig""" )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "new-model"
try:
AutoConfig.register("""new-model""" ,lowerCamelCase__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ,trust_remote_code=lowerCamelCase__ )
self.assertEqual(config.__class__.__name__ ,"""NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 296
| 1
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A = logging.get_logger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''', _A, )
super().__init__(*_A, **_A )
| 357
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
lowerCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, '''w''', encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, **UpperCamelCase__ ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCAmelCase_ = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
lowerCAmelCase_ = CLIPSegProcessor.from_pretrained(
self.tmpdirname, bos_token='''(BOS)''', eos_token='''(EOS)''', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''np''' )
lowerCAmelCase_ = processor(images=UpperCamelCase__, return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = processor(text=UpperCamelCase__ )
lowerCAmelCase_ = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=UpperCamelCase__, visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = CLIPSegProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(UpperCamelCase__ )
lowerCAmelCase_ = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
| 167
| 0
|
"""simple docstring"""
from manim import *
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : str = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_lowerCAmelCase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 )
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = Text("""CPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(a__ )
_lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(4 )]
_lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : str = Text("""GPU""" , font_size=24 )
_lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
gpu.move_to([-1, -1, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = Text("""Model""" , font_size=24 )
_lowerCAmelCase : int = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
model.move_to([3, -1.0, 0] )
self.add(a__ )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : str = []
for i, rect in enumerate(a__ ):
_lowerCAmelCase : int = fill.copy().set_fill(a__ , opacity=0.8 )
target.move_to(a__ )
model_arr.append(a__ )
_lowerCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(a__ )
self.add(*a__ , *a__ )
_lowerCAmelCase : str = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )]
_lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : int = VGroup(a__ , a__ ).arrange(a__ , buff=0 )
_lowerCAmelCase : Dict = Text("""Disk""" , font_size=24 )
_lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ )
disk.move_to([-4, -1.2_5, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase : Dict = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(a__ , a__ )
_lowerCAmelCase : List[str] = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(a__ )
_lowerCAmelCase : List[str] = MarkupText(
F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ ) )
_lowerCAmelCase : Union[str, Any] = Square(0.3 )
input.set_fill(a__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , a__ , buff=0.5 )
self.play(Write(a__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=a__ , buff=0.0_2 )
self.play(MoveToTarget(a__ ) )
self.play(FadeOut(a__ ) )
_lowerCAmelCase : Dict = Arrow(start=a__ , end=a__ , color=a__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , a__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
_lowerCAmelCase : Optional[Any] = MarkupText(
F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) )
_lowerCAmelCase : Optional[int] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2}
self.play(
Write(a__ ) , Circumscribe(model_arr[0] , color=a__ , **a__ ) , Circumscribe(model_cpu_arr[0] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
_lowerCAmelCase : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , a__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
_lowerCAmelCase : Union[str, Any] = AnimationGroup(
FadeOut(a__ , run_time=0.5 ) , MoveToTarget(a__ , run_time=0.5 ) , FadeIn(a__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(a__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
_lowerCAmelCase : int = 0.7
self.play(
Circumscribe(model_arr[i] , **a__ ) , Circumscribe(cpu_left_col_base[i] , **a__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , Circumscribe(model_arr[i + 1] , color=a__ , **a__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=a__ , **a__ ) , Circumscribe(cpu_left_col_base[-1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
_lowerCAmelCase : Any = a_c
_lowerCAmelCase : Any = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(a__ ) , FadeOut(a__ , run_time=0.5 ) , )
_lowerCAmelCase : List[str] = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(a__ , run_time=3 ) , MoveToTarget(a__ ) )
self.wait()
| 44
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ['''input_features''']
def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = n_fft
lowerCAmelCase = hop_length
lowerCAmelCase = chunk_length
lowerCAmelCase = chunk_length * sampling_rate
lowerCAmelCase = self.n_samples // hop_length
lowerCAmelCase = sampling_rate
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCAmelCase = log_spec[:, :-1]
lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0)
lowerCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [np.asarray([raw_speech]).T]
lowerCAmelCase = BatchFeature({"""input_features""": raw_speech})
# convert into correct format for padding
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0)
# make sure list is in array format
lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1)
lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features]
else:
lowerCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = copy.deepcopy(self.__dict__)
lowerCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 272
| 0
|
from ... import PretrainedConfig
UpperCamelCase = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Any = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__snake_case : List[str] = "nezha"
def __init__( self: Optional[int] , UpperCAmelCase_: Tuple=21_128 , UpperCAmelCase_: Union[str, Any]=768 , UpperCAmelCase_: int=12 , UpperCAmelCase_: List[str]=12 , UpperCAmelCase_: str=3_072 , UpperCAmelCase_: int="gelu" , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Union[str, Any]=0.1 , UpperCAmelCase_: Dict=512 , UpperCAmelCase_: Optional[Any]=64 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: List[str]=0.02 , UpperCAmelCase_: List[Any]=1E-12 , UpperCAmelCase_: List[str]=0.1 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: Union[str, Any]=2 , UpperCAmelCase_: str=3 , UpperCAmelCase_: str=True , **UpperCAmelCase_: Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = max_relative_position
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = classifier_dropout
_SCREAMING_SNAKE_CASE = use_cache
| 125
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = TransfoXLTokenizer
__snake_case : Tuple = False
__snake_case : List[Any] = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<unk> UNwanted , running"""
_SCREAMING_SNAKE_CASE = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(UpperCAmelCase_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [0, 4, 8, 7] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_SCREAMING_SNAKE_CASE = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 125
| 1
|
from __future__ import annotations
from collections import namedtuple
def _a ( a :List[str] , a :Optional[int] , a :Dict ) -> tuple:
a = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
|
'''simple docstring'''
_snake_case = 8.3_1_4_4_5_9_8
def _A ( snake_case , snake_case ) -> float:
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_snake_case = 300
_snake_case = 28
_snake_case = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 250
| 0
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
A: str = cst_fwd.get(__UpperCamelCase , np.inf )
A: Optional[Any] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
A: Dict = new_cost_f
A: List[Any] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
A: Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
A: List[str] = -1
A: List[str] = set()
A: Tuple = set()
A: Any = {source: 0}
A: Dict = {destination: 0}
A: Optional[Any] = {source: None}
A: str = {destination: None}
A: Optional[int] = PriorityQueue()
A: Any = PriorityQueue()
A: str = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
A , A: Optional[int] = queue_forward.get()
visited_forward.add(__UpperCamelCase )
A , A: Tuple = queue_backward.get()
visited_backward.add(__UpperCamelCase )
A: Any = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
A: str = pass_and_relaxation(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
A: str = shortest_distance
return shortest_path_distance
UpperCamelCase = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
UpperCamelCase = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
'''simple docstring'''
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
'''simple docstring'''
A: Tuple = None
A: Dict = None
A: Optional[int] = graph
self._normalize_graph(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: str = len(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = None
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
if sources is int:
A: Union[str, Any] = [sources]
if sinks is int:
A: Tuple = [sinks]
if len(SCREAMING_SNAKE_CASE_ ) == 0 or len(SCREAMING_SNAKE_CASE_ ) == 0:
return
A: List[str] = sources[0]
A: Optional[int] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE_ ) > 1 or len(SCREAMING_SNAKE_CASE_ ) > 1:
A: Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A: Dict = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A: Optional[Any] = max_input_flow
A: Optional[Any] = 0
A: str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A: Optional[Any] = max_input_flow
A: str = size - 1
def _snake_case ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A: Optional[Any] = algorithm(self )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A: str = flow_network
A: List[str] = flow_network.verticesCount
A: Dict = flow_network.sourceIndex
A: Any = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A: str = flow_network.graph
A: str = False
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.executed:
self._algorithm()
A: str = True
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
# use this to save your result
A: Any = -1
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ )
A: Optional[int] = [[0] * self.verticies_count for i in range(self.verticies_count )]
A: Any = [0] * self.verticies_count
A: Optional[Any] = [0] * self.verticies_count
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A: str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A: Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
A: Any = vertices_list[i]
A: str = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE_ ) )
A: Tuple = 0
else:
i += 1
A: Tuple = sum(self.preflow[self.source_index] )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.relabel(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: Optional[int] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> int:
'''simple docstring'''
A: Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A: List[Any] = self.heights[to_index]
if min_height is not None:
A: int = min_height + 1
if __name__ == "__main__":
UpperCamelCase = [0]
UpperCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase = flow_network.find_maximum_flow()
print(f'maximum flow is {maximum_flow}')
| 334
| 0
|
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
__lowerCAmelCase : int = 'bert-base-cased'
__lowerCAmelCase : List[str] = 'fp16'
__lowerCAmelCase : Any = 'bf16'
__lowerCAmelCase : Optional[Any] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__magic_name__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(snake_case_ ):
__magic_name__ = self.dist_env.copy()
__magic_name__ = F'''{i + 1}'''
__magic_name__ = strategy
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(snake_case_ ):
__magic_name__ = self.dist_env.copy()
__magic_name__ = prefetch_policy
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self : Dict ) -> Dict:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(snake_case_ ):
__magic_name__ = self.dist_env.copy()
__magic_name__ = state_dict_type
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = AutoModel.from_pretrained(snake_case_ )
for policy in FSDP_AUTO_WRAP_POLICY:
__magic_name__ = self.dist_env.copy()
__magic_name__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
__magic_name__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
__magic_name__ = """2000"""
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__magic_name__ = self.dist_env.copy()
__magic_name__ = """TRANSFORMER_BASED_WRAP"""
__magic_name__ = """T5Layer"""
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
with self.assertRaises(snake_case_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(snake_case_ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
__magic_name__ = self.dist_env.copy()
__magic_name__ = """SIZE_BASED_WRAP"""
__magic_name__ = """0"""
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(snake_case_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__magic_name__ = self.dist_env.copy()
__magic_name__ = mp_dtype
with mockenv_context(**snake_case_ ):
__magic_name__ = Accelerator()
if mp_dtype == "fp16":
__magic_name__ = torch.floataa
elif mp_dtype == "bf16":
__magic_name__ = torch.bfloataa
__magic_name__ = MixedPrecision(param_dtype=snake_case_ , reduce_dtype=snake_case_ , buffer_dtype=snake_case_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , snake_case_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , snake_case_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(snake_case_ )
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__magic_name__ = self.dist_env.copy()
__magic_name__ = str(snake_case_ ).lower()
with mockenv_context(**snake_case_ ):
__magic_name__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=snake_case_ ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
super().setUp()
__magic_name__ = 0.82
__magic_name__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
__magic_name__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__magic_name__ = 160
__magic_name__ = 160
__magic_name__ = inspect.getfile(accelerate.test_utils )
__magic_name__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _lowercase ( self : Any ) -> Tuple:
"""simple docstring"""
__magic_name__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
__magic_name__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
__magic_name__ = cmd.copy()
for i, strategy in enumerate(snake_case_ ):
if strategy.lower() in config:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
__magic_name__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(snake_case_ ):
__magic_name__ = cmd.copy()
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
__magic_name__ = len(snake_case_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__magic_name__ = cmd_config[:state_dict_config_index]
cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
__magic_name__ = cmd_config[:-1]
__magic_name__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
F'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
__magic_name__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__magic_name__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(snake_case_ ):
if strategy.lower() in spec:
cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
F'''--output_dir={self.tmpdir}''',
F'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
F'''--n_train={self.n_train}''',
F'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 88
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "glpn"
def __init__( self : Optional[Any] , snake_case_ : List[str]=3 , snake_case_ : Dict=4 , snake_case_ : List[Any]=[2, 2, 2, 2] , snake_case_ : int=[8, 4, 2, 1] , snake_case_ : List[str]=[32, 64, 160, 256] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : Tuple=[1, 2, 5, 8] , snake_case_ : List[str]=[4, 4, 4, 4] , snake_case_ : Optional[int]="gelu" , snake_case_ : Dict=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=0.02 , snake_case_ : Tuple=0.1 , snake_case_ : Any=1E-6 , snake_case_ : Dict=64 , snake_case_ : Tuple=10 , snake_case_ : List[Any]=-1 , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = patch_sizes
snake_case__ : int = strides
snake_case__ : List[Any] = mlp_ratios
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : Tuple = decoder_hidden_size
snake_case__ : List[Any] = max_depth
snake_case__ : Dict = head_in_index
| 35
| 0
|
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase ( _A : Optional[int] )-> List[Any]:
"""simple docstring"""
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = FileLock(str(tmpdir / "foo.lock" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(_A ):
A__ = time.time()
locka.acquire(_A )
assert time.time() - _start > timeout
def UpperCamelCase ( _A : str )-> List[Any]:
"""simple docstring"""
A__ = "a" * 1000 + ".lock"
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(_A )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_A ):
locka.acquire(0 )
| 198
| 1
|
"""simple docstring"""
from math import factorial
def _A (__a , __a , __a ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
SCREAMING_SNAKE_CASE_ : Optional[int] = float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.7_5))
| 91
|
"""simple docstring"""
import pprint
import requests
_lowerCamelCase : Tuple = 'https://zenquotes.io/api'
def lowercase_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase_ ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_lowerCamelCase : List[Any] = random_quotes()
pprint.pprint(response)
| 167
| 0
|
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
A : Optional[int] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
A : Tuple = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
A : Any = transform(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
return image
def __UpperCamelCase ( _lowerCAmelCase ) -> int:
"""simple docstring"""
if "visual_encoder" in key:
A : Optional[int] = re.sub("""visual_encoder*""" , """vision_model.encoder""" , lowerCAmelCase__ )
if "blocks" in key:
A : Optional[int] = re.sub(R"""blocks""" , """layers""" , lowerCAmelCase__ )
if "attn" in key:
A : Dict = re.sub(R"""attn""" , """self_attn""" , lowerCAmelCase__ )
if "norm1" in key:
A : Tuple = re.sub(R"""norm1""" , """layer_norm1""" , lowerCAmelCase__ )
if "norm2" in key:
A : List[str] = re.sub(R"""norm2""" , """layer_norm2""" , lowerCAmelCase__ )
if "encoder.norm" in key:
A : Tuple = re.sub(R"""encoder.norm""" , """post_layernorm""" , lowerCAmelCase__ )
if "encoder.patch_embed.proj" in key:
A : Optional[int] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , lowerCAmelCase__ )
if "encoder.pos_embed" in key:
A : Optional[Any] = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , lowerCAmelCase__ )
if "encoder.cls_token" in key:
A : Tuple = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , lowerCAmelCase__ )
if "self_attn" in key:
A : List[Any] = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , lowerCAmelCase__ )
return key
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
A : Union[str, Any] = BlipConfig.from_pretrained(lowerCAmelCase__ )
else:
A : List[Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
A : Any = BlipForConditionalGeneration(lowerCAmelCase__ ).eval()
A : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
A : str = blip_decoder(pretrained=lowerCAmelCase__ , image_size=384 , vit="""base""" )
A : int = pt_model.eval()
A : Dict = pt_model.state_dict()
for key in modified_state_dict.copy():
A : List[str] = modified_state_dict.pop(lowerCAmelCase__ )
A : List[str] = rename_key(lowerCAmelCase__ )
A : int = value
hf_model.load_state_dict(lowerCAmelCase__ )
A : str = 384
A : Optional[int] = load_demo_image(image_size=lowerCAmelCase__ , device="""cpu""" )
A : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A : Tuple = tokenizer(["""a picture of"""] ).input_ids
A : List[Any] = hf_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
A : str = hf_model.generate(lowerCAmelCase__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCAmelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A : str = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
A : str = blip_vqa(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" )
vqa_model.eval()
A : Optional[int] = vqa_model.state_dict()
for key in modified_state_dict.copy():
A : Tuple = modified_state_dict.pop(lowerCAmelCase__ )
A : int = rename_key(lowerCAmelCase__ )
A : Optional[Any] = value
A : Dict = BlipForQuestionAnswering(lowerCAmelCase__ )
hf_vqa_model.load_state_dict(lowerCAmelCase__ )
A : int = ["""How many dogs are in this image?"""]
A : Optional[Any] = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" ).input_ids
A : Optional[int] = hf_vqa_model.generate(lowerCAmelCase__ , lowerCAmelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
A : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
A : Tuple = blip_itm(pretrained=lowerCAmelCase__ , image_size=lowerCAmelCase__ , vit="""base""" )
itm_model.eval()
A : Any = itm_model.state_dict()
for key in modified_state_dict.copy():
A : List[str] = modified_state_dict.pop(lowerCAmelCase__ )
A : int = rename_key(lowerCAmelCase__ )
A : Optional[int] = value
A : Any = BlipForImageTextRetrieval(lowerCAmelCase__ )
A : List[str] = ["""A picture of a woman with a dog sitting in a beach"""]
A : Optional[Any] = tokenizer(
lowerCAmelCase__ , return_tensors="""pt""" , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCAmelCase__ )
hf_itm_model.eval()
A : Dict = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
A : Union[str, Any] = hf_itm_model(lowerCAmelCase__ , lowerCAmelCase__ , use_itm_head=lowerCAmelCase__ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE_:List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 366
|
import requests
SCREAMING_SNAKE_CASE_:List[str] = """""" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE_:Dict = """https://api.openweathermap.org/data/2.5/"""
def __UpperCamelCase ( _lowerCAmelCase = "Chicago" , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def __UpperCamelCase ( _lowerCAmelCase = "Kolkata, India" , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def __UpperCamelCase ( _lowerCAmelCase = 55.68 , _lowerCAmelCase = 12.57 , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE_:int = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 115
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __a (unittest.TestCase , lowerCamelCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = load_tool('''text-to-speech''' )
self.tool.setup()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = self.tool('''hey''' )
UpperCAmelCase_ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = self.tool('''hey''' )
UpperCAmelCase_ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 125
|
'''simple docstring'''
from math import factorial
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(SCREAMING_SNAKE_CASE__ ) // (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 125
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Optional[int] = logging.get_logger(__name__)
__lowercase : Any = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "ibert"
def __init__( self , __a=3_0522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=False , __a="none" , **__a , ):
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__a : Tuple = vocab_size
__a : Any = hidden_size
__a : Tuple = num_hidden_layers
__a : Dict = num_attention_heads
__a : Tuple = hidden_act
__a : Any = intermediate_size
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Any = type_vocab_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = layer_norm_eps
__a : Optional[int] = position_embedding_type
__a : int = quant_mode
__a : Optional[int] = force_dequant
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 294
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase : str = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "dhaka" , _SCREAMING_SNAKE_CASE : int = 5 ):
__a : Optional[Any] = min(_SCREAMING_SNAKE_CASE , 50 ) # Prevent abuse!
__a : Optional[Any] = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
__a : Tuple = requests.get('https://www.google.com/search' , params=_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE )
__a : Dict = BeautifulSoup(html.text , 'html.parser' )
__a : List[str] = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
__a : Optional[Any] = json.dumps(_SCREAMING_SNAKE_CASE )
__a : List[str] = json.loads(_SCREAMING_SNAKE_CASE )
__a : List[Any] = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _SCREAMING_SNAKE_CASE , )
if not matched_google_image_data:
return 0
__a : Tuple = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_SCREAMING_SNAKE_CASE ) , )
__a : Optional[Any] = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _SCREAMING_SNAKE_CASE , )
for index, fixed_full_res_image in enumerate(_SCREAMING_SNAKE_CASE ):
if index >= max_images:
return index
__a : List[str] = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Tuple = bytes(_SCREAMING_SNAKE_CASE , 'ascii' ).decode(
'unicode-escape' )
__a : Dict = urllib.request.build_opener()
__a : Union[str, Any] = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(_SCREAMING_SNAKE_CASE )
__a : List[Any] = F"""query_{query.replace(" " , "_" )}"""
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
urllib.request.urlretrieve( # noqa: S310
_SCREAMING_SNAKE_CASE , F"""{path_name}/original_size_img_{index}.jpg""" )
return index
if __name__ == "__main__":
try:
__lowercase : Optional[int] = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 294
| 1
|
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE : List[str] = TypeVar("T")
class _snake_case ( Generic[T] ):
def __init__( self , a__ = True ) -> None:
'''simple docstring'''
snake_case_ = {} # dictionary of lists
snake_case_ = directed
def lowerCAmelCase__ ( self , a__ , a__ ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
self.adj_list[destination_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(a__ )
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case_ = [destination_vertex]
snake_case_ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(a__ )
snake_case_ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case_ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case_ = [destination_vertex]
snake_case_ = []
return self
def __repr__( self ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 85
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'nllb-moe'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str ,snake_case : Optional[int]=128112 ,snake_case : Any=1024 ,snake_case : List[str]=12 ,snake_case : Optional[int]=4096 ,snake_case : List[str]=16 ,snake_case : Optional[Any]=12 ,snake_case : Optional[Any]=4096 ,snake_case : List[Any]=16 ,snake_case : Optional[Any]=0.05 ,snake_case : str=0.05 ,snake_case : Optional[int]=True ,snake_case : Tuple=True ,snake_case : Optional[Any]="relu" ,snake_case : Any=1024 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : Optional[Any]=0.0 ,snake_case : List[Any]=0.02 ,snake_case : Any=2 ,snake_case : Dict=True ,snake_case : Tuple=False ,snake_case : Any="float32" ,snake_case : Tuple=False ,snake_case : List[Any]=128 ,snake_case : Tuple=64 ,snake_case : List[Any]=4 ,snake_case : List[Any]=4 ,snake_case : List[Any]=0.001 ,snake_case : int=0.001 ,snake_case : Tuple="all" ,snake_case : Union[str, Any]=False ,snake_case : Union[str, Any]=False ,snake_case : Optional[int]=1.0 ,snake_case : Optional[Any]=0.2 ,snake_case : Optional[int]=1 ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=2 ,snake_case : List[Any]=False ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =encoder_ffn_dim
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =encoder_attention_heads
SCREAMING_SNAKE_CASE =decoder_ffn_dim
SCREAMING_SNAKE_CASE =decoder_layers
SCREAMING_SNAKE_CASE =decoder_attention_heads
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =attention_dropout
SCREAMING_SNAKE_CASE =activation_dropout
SCREAMING_SNAKE_CASE =activation_function
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =use_cache
SCREAMING_SNAKE_CASE =encoder_layers
SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE =router_z_loss_coef
SCREAMING_SNAKE_CASE =router_aux_loss_coef
SCREAMING_SNAKE_CASE =decoder_sparse_step
SCREAMING_SNAKE_CASE =encoder_sparse_step
SCREAMING_SNAKE_CASE =num_experts
SCREAMING_SNAKE_CASE =expert_capacity
SCREAMING_SNAKE_CASE =router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
SCREAMING_SNAKE_CASE =router_dtype
SCREAMING_SNAKE_CASE =router_ignore_padding_tokens
SCREAMING_SNAKE_CASE =batch_prioritized_routing
SCREAMING_SNAKE_CASE =second_expert_policy
SCREAMING_SNAKE_CASE =normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE =moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE =moe_token_dropout
SCREAMING_SNAKE_CASE =output_router_logits
super().__init__(
pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,is_encoder_decoder=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
| 334
| 0
|
"""simple docstring"""
_a= {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_a= {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __UpperCAmelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ) -> list[int]:
'''simple docstring'''
__snake_case : Tuple = True
__snake_case : Optional[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
order.append(UpperCAmelCase_ )
return order
def __UpperCAmelCase ( UpperCAmelCase_ : dict[int, list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[bool] ) -> list[int]:
'''simple docstring'''
__snake_case : str = True
__snake_case : Dict = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return component
def __UpperCAmelCase ( UpperCAmelCase_ : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
__snake_case : Tuple = len(UpperCAmelCase_ ) * [False]
__snake_case : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCAmelCase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCAmelCase_ )
__snake_case : Tuple = []
for i, was_visited in enumerate(UpperCAmelCase_ ):
if not was_visited:
order += topology_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case : int = []
__snake_case : List[str] = len(UpperCAmelCase_ ) * [False]
for i in range(len(UpperCAmelCase_ ) ):
__snake_case : str = order[len(UpperCAmelCase_ ) - i - 1]
if not visited[vert]:
__snake_case : int = find_components(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
components_list.append(UpperCAmelCase_ )
return components_list
| 351
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[int] = """new-model"""
if is_tf_available():
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = NewModelConfig
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def _lowercase (self : List[str]) -> Dict:
__snake_case : Any = 'bert-base-cased'
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModel.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : List[Any]) -> str:
__snake_case : Optional[int] = 'bert-base-cased'
__snake_case : List[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Any) -> List[str]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A)
__snake_case , __snake_case : List[str] = TFAutoModelForCausalLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Tuple) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Union[str, Any]) -> Optional[int]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(_A)
__snake_case , __snake_case : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> Union[str, Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(_A)
__snake_case , __snake_case : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : str) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Tuple = TFAutoModelForSequenceClassification.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
def _lowercase (self : Optional[Any]) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__snake_case : List[str] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : Any = TFAutoModelForQuestionAnswering.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
@slow
@require_tensorflow_probability
def _lowercase (self : List[Any]) -> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
__snake_case : int = TFAutoModelForTableQuestionAnswering.from_pretrained(_A)
__snake_case , __snake_case : Dict = TFAutoModelForTableQuestionAnswering.from_pretrained(
_A , output_loading_info=_A)
self.assertIsNotNone(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[Any]) -> Optional[Any]:
__snake_case : Optional[int] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Any) -> List[str]:
__snake_case : List[Any] = TFAutoModelWithLMHead.from_pretrained(_A)
self.assertIsInstance(_A , _A)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=_A) , 1_44_10)
def _lowercase (self : Optional[Any]) -> str:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__snake_case : Optional[Any] = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny')
self.assertIsInstance(_A , _A)
__snake_case : int = copy.deepcopy(model.config)
__snake_case : int = ['FunnelBaseModel']
__snake_case : int = TFAutoModel.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : List[Any] = TFAutoModel.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> int:
try:
AutoConfig.register('new-model' , _A)
__snake_case : int = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__):
# Wrong config class will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
auto_class.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
auto_class.register(_A , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case : Union[str, Any] = BertModelTester(self).get_config()
__snake_case : Optional[int] = NewModelConfig(**tiny_config.to_dict())
__snake_case : List[str] = auto_class.from_config(_A)
self.assertIsInstance(_A , _A)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A)
__snake_case : Tuple = auto_class.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def _lowercase (self : Optional[int]) -> Union[str, Any]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier'):
__snake_case : Any = TFAutoModel.from_pretrained('bert-base')
def _lowercase (self : str) -> str:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : Optional[Any] = TFAutoModel.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : int) -> Any:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[Any]) -> Any:
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model'):
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
def _lowercase (self : str) -> Any:
# Make sure we have cached the model.
__snake_case : str = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__snake_case : List[str] = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
# With a sharded checkpoint
__snake_case : Optional[int] = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
with RequestCounter() as counter:
__snake_case : Any = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 95
| 0
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = 13 , __lowerCAmelCase = 64 , __lowerCAmelCase = 2 , __lowerCAmelCase = 3 , __lowerCAmelCase = 3 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 128 , __lowerCAmelCase=[16, 32, 64, 128] , __lowerCAmelCase = 7 , __lowerCAmelCase = 4 , __lowerCAmelCase = 37 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 10 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 128 , __lowerCAmelCase = [2, 2, 2, 2] , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , ) -> str:
lowercase__ : Optional[Any] = parent
lowercase__ : Any = batch_size
lowercase__ : List[str] = image_size
lowercase__ : Dict = patch_size
lowercase__ : str = num_channels
lowercase__ : List[Any] = is_training
lowercase__ : int = use_labels
lowercase__ : Any = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : Dict = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : str = type_sequence_label_size
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : List[Any] = encoder_stride
lowercase__ : Dict = num_attention_outputs
lowercase__ : Dict = embed_dim
lowercase__ : int = embed_dim + 1
lowercase__ : Dict = resolution
lowercase__ : Tuple = depths
lowercase__ : str = hidden_sizes
lowercase__ : Dict = dim
lowercase__ : Dict = mlp_expansion_ratio
def _lowerCAmelCase( self ) -> Any:
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase( self ) -> Union[str, Any]:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Optional[Any] = TFEfficientFormerModel(config=__lowerCAmelCase )
lowercase__ : List[str] = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : Union[str, Any] = self.type_sequence_label_size
lowercase__ : Optional[Any] = TFEfficientFormerForImageClassification(__lowerCAmelCase )
lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFEfficientFormerForImageClassification(__lowerCAmelCase )
lowercase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Tuple = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : int = config_and_inputs
lowercase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = TFEfficientFormerModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(
self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> str:
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def _lowerCAmelCase( self ) -> List[str]:
pass
def _lowerCAmelCase( self ) -> int:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(__lowerCAmelCase )
lowercase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = model_class(__lowerCAmelCase )
lowercase__ : Tuple = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) , training=__lowerCAmelCase )
lowercase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Tuple = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
lowercase__ : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
lowercase__ : str = seq_length * self.model_tester.chunk_length
else:
lowercase__ : List[str] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowercase__ : Optional[Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__lowerCAmelCase , (list, tuple) )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
lowercase__ : str = getattr(self.model_tester , '''seq_length''' , __lowerCAmelCase )
lowercase__ : Tuple = getattr(self.model_tester , '''decoder_seq_length''' , __lowerCAmelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[Any]:
lowercase__ : str = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = TFEfficientFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = True
lowercase__ : List[Any] = getattr(self.model_tester , '''seq_length''' , __lowerCAmelCase )
lowercase__ : Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , __lowerCAmelCase )
lowercase__ : List[str] = getattr(self.model_tester , '''key_length''' , __lowerCAmelCase )
lowercase__ : List[Any] = getattr(self.model_tester , '''chunk_length''' , __lowerCAmelCase )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
lowercase__ : str = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase__ : int = True
lowercase__ : Dict = False
lowercase__ : List[str] = True
lowercase__ : Tuple = model_class(__lowerCAmelCase )
lowercase__ : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) , training=__lowerCAmelCase )
lowercase__ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : Any = True
lowercase__ : Optional[int] = model_class(__lowerCAmelCase )
lowercase__ : Optional[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) , training=__lowerCAmelCase )
lowercase__ : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__lowerCAmelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _lowerCAmelCase( self ) -> int:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase__ : Optional[Any] = model_class(__lowerCAmelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase__ : List[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__lowerCAmelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase__ : Optional[int] = model(__lowerCAmelCase )
self.assertTrue(outputs_dict is not None )
def __UpperCamelCase ( ):
lowercase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase( self ) -> Dict:
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Dict = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
lowercase__ : Optional[int] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
lowercase__ : Any = model(**__lowerCAmelCase , training=__lowerCAmelCase )
# verify the logits
lowercase__ : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : Dict = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Optional[int] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''tf''' )
# forward pass
lowercase__ : str = model(**__lowerCAmelCase , training=__lowerCAmelCase )
# verify the logits
lowercase__ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase__ : Union[str, Any] = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 198
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "encoder-decoder"
SCREAMING_SNAKE_CASE = True
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowercase__ : Optional[int] = kwargs.pop('''encoder''' )
lowercase__ : Union[str, Any] = encoder_config.pop('''model_type''' )
lowercase__ : Any = kwargs.pop('''decoder''' )
lowercase__ : Any = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__ : Union[str, Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Optional[Any] = AutoConfig.for_model(__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Tuple = True
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase__ : Union[str, Any] = True
lowercase__ : Any = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Any = copy.deepcopy(self.__dict__ )
lowercase__ : Optional[Any] = self.encoder.to_dict()
lowercase__ : Tuple = self.decoder.to_dict()
lowercase__ : Dict = self.__class__.model_type
return output
| 198
| 1
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 213
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : int = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213
| 1
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = parent
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {}
def __lowercase ( ) -> Tuple:
'''simple docstring'''
_A = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_A = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.feature_extraction_class()
# Test not batched input
_A = get_html_strings()[0]
_A = feature_extractor(__UpperCAmelCase )
# fmt: off
_A = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_A = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
# Test batched
_A = get_html_strings()
_A = feature_extractor(__UpperCAmelCase )
# fmt: off
_A = expected_nodes + [["My First Heading", "My first paragraph."]]
_A = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __UpperCAmelCase )
self.assertEqual(encoding.xpaths , __UpperCAmelCase )
| 79
|
"""simple docstring"""
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Dict = 2
__UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__UpperCAmelCase : Tuple = [True] * (end + 1)
__UpperCAmelCase : int = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__UpperCAmelCase : Dict = False
start += 1
prime += in_prime
__UpperCAmelCase : Optional[int] = end + 1
__UpperCAmelCase : Dict = min(2 * end , _UpperCamelCase )
while low <= n:
__UpperCAmelCase : Union[str, Any] = [True] * (high - low + 1)
for each in in_prime:
__UpperCAmelCase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__UpperCAmelCase : Tuple = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__UpperCAmelCase : Tuple = high + 1
__UpperCAmelCase : Optional[int] = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 115
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase : Any = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
UpperCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : str=False ) -> int:
"""simple docstring"""
a , a : Any = create_model(
'HTSAT-tiny' , 'roberta' , __lowerCAmelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=__lowerCAmelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Tuple:
"""simple docstring"""
a : Any = {}
a : Optional[Any] = R'.*sequential.(\d+).*'
a : Tuple = R'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
a : str = key.replace(__lowerCAmelCase , __lowerCAmelCase )
if re.match(__lowerCAmelCase , __lowerCAmelCase ):
# replace sequential layers with list
a : Optional[int] = re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 )
a : int = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__lowerCAmelCase )//3}.linear.""" )
elif re.match(__lowerCAmelCase , __lowerCAmelCase ):
a : Union[str, Any] = int(re.match(__lowerCAmelCase , __lowerCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
a : Optional[int] = 1 if projecton_layer == 0 else 2
a : str = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
a : Tuple = value
a : Optional[int] = mixed_qkv.size(0 ) // 3
a : Optional[Any] = mixed_qkv[:qkv_dim]
a : List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
a : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
a : Tuple = query_layer
a : List[Any] = key_layer
a : Optional[int] = value_layer
else:
a : Tuple = value
return model_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Tuple , snake_case : Tuple , snake_case : int=False ) -> List[Any]:
"""simple docstring"""
a , a : List[Any] = init_clap(__lowerCAmelCase , enable_fusion=__lowerCAmelCase )
clap_model.eval()
a : Optional[int] = clap_model.state_dict()
a : Dict = rename_state_dict(__lowerCAmelCase )
a : List[str] = ClapConfig()
a : Dict = enable_fusion
a : Dict = ClapModel(__lowerCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
transformers_config.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
UpperCamelCase : Any = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 353
|
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345
| 0
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_snake_case = None
try:
import msvcrt
except ImportError:
_snake_case = None
try:
import fcntl
except ImportError:
_snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_snake_case = OSError
# Data
# ------------------------------------------------
_snake_case = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
_snake_case = '3.0.12'
_snake_case = None
def lowerCAmelCase__ ( ):
'''simple docstring'''
global _logger
_a : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( snake_case_ ):
def __init__( self : int , UpperCAmelCase__ : str ) -> Optional[int]:
_a : Optional[Any] = lock_file
return None
def __str__( self : str ) -> Dict:
_a : Any = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> List[Any]:
_a : Dict = lock
return None
def __enter__( self : List[Any] ) -> int:
return self.lock
def __exit__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
self.lock.release()
return None
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]=-1 , UpperCAmelCase__ : List[Any]=None ) -> Any:
_a : Optional[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
_a : Tuple = self.hash_filename_if_too_long(UpperCAmelCase__ , UpperCAmelCase__ )
# The path to the lock file.
_a : Dict = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a : str = None
# The default timeout value.
_a : Tuple = timeout
# We use this lock primarily for the lock counter.
_a : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a : Optional[int] = 0
return None
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
return self._lock_file
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
return self._timeout
@timeout.setter
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
_a : int = float(UpperCAmelCase__ )
return None
def _lowercase ( self : List[str] ) -> List[Any]:
raise NotImplementedError()
def _lowercase ( self : int ) -> Union[str, Any]:
raise NotImplementedError()
@property
def _lowercase ( self : Optional[Any] ) -> Dict:
return self._lock_file_fd is not None
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=0.0_5 ) -> Union[str, Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a : Dict = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a : Optional[int] = id(self )
_a : Any = self._lock_file
_a : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(UpperCAmelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a : str = id(self )
_a : Dict = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_a : int = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Optional[int] ) -> str:
self.acquire()
return self
def __exit__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple ) -> Optional[int]:
self.release()
return None
def __del__( self : Tuple ) -> Optional[int]:
self.release(force=UpperCAmelCase__ )
return None
def _lowercase ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> str:
_a : Union[str, Any] = os.path.basename(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > max_length and max_length > 0:
_a : List[Any] = os.path.dirname(UpperCAmelCase__ )
_a : Union[str, Any] = str(hash(UpperCAmelCase__ ) )
_a : Tuple = filename[: max_length - len(UpperCAmelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
else:
return path
class UpperCamelCase ( snake_case_ ):
def __init__( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=None ) -> Any:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase__ , timeout=UpperCAmelCase__ , max_filename_length=UpperCAmelCase__ )
_a : Dict = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _lowercase ( self : Optional[int] ) -> List[Any]:
_a : Tuple = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a : List[Any] = os.open(self._lock_file , UpperCAmelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCAmelCase__ )
else:
_a : List[Any] = fd
return None
def _lowercase ( self : List[Any] ) -> int:
_a : str = self._lock_file_fd
_a : Any = None
msvcrt.locking(UpperCAmelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(UpperCAmelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( snake_case_ ):
def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str]=-1 , UpperCAmelCase__ : List[str]=None ) -> int:
_a : Dict = os.statvfs(os.path.dirname(UpperCAmelCase__ ) ).f_namemax
super().__init__(UpperCAmelCase__ , timeout=UpperCAmelCase__ , max_filename_length=UpperCAmelCase__ )
def _lowercase ( self : Tuple ) -> Any:
_a : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a : Optional[Any] = os.open(self._lock_file , UpperCAmelCase__ )
try:
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase__ )
else:
_a : Optional[int] = fd
return None
def _lowercase ( self : List[Any] ) -> Dict:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a : List[str] = self._lock_file_fd
_a : List[str] = None
fcntl.flock(UpperCAmelCase__ , fcntl.LOCK_UN )
os.close(UpperCAmelCase__ )
return None
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Tuple ) -> Any:
_a : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a : Tuple = os.open(self._lock_file , UpperCAmelCase__ )
except OSError:
pass
else:
_a : Union[str, Any] = fd
return None
def _lowercase ( self : Dict ) -> int:
os.close(self._lock_file_fd )
_a : List[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_snake_case = None
if msvcrt:
_snake_case = WindowsFileLock
elif fcntl:
_snake_case = UnixFileLock
else:
_snake_case = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 294
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if gpta_config_file == "":
lowercase : Any = GPTaConfig()
else:
lowercase : Optional[Any] = GPTaConfig.from_json_file(_UpperCAmelCase )
lowercase : Optional[int] = GPTaModel(_UpperCAmelCase )
# Load weights from numpy
load_tf_weights_in_gpta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
lowercase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_UpperCamelCase: str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 367
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class a__ :
def __init__( self : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : Tuple=13, lowerCAmelCase : List[Any]=2, lowerCAmelCase : Tuple=24, lowerCAmelCase : Any=16, lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : Tuple=True, lowerCAmelCase : Optional[int]=32, lowerCAmelCase : Optional[int]=5, lowerCAmelCase : Optional[int]=4, lowerCAmelCase : Optional[int]=37, lowerCAmelCase : Tuple="gelu", lowerCAmelCase : str=0.1, lowerCAmelCase : Tuple=0.1, lowerCAmelCase : List[Any]=10, lowerCAmelCase : List[Any]=0.02, lowerCAmelCase : List[str]=None, lowerCAmelCase : Any=2, lowerCAmelCase : str=2, ) -> Union[str, Any]:
lowercase : str = parent
lowercase : Optional[int] = batch_size
lowercase : str = patch_size
lowercase : List[Any] = max_length
lowercase : Optional[Any] = num_mel_bins
lowercase : int = is_training
lowercase : Dict = use_labels
lowercase : List[str] = hidden_size
lowercase : str = num_hidden_layers
lowercase : Any = num_attention_heads
lowercase : List[str] = intermediate_size
lowercase : int = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : Optional[Any] = attention_probs_dropout_prob
lowercase : int = type_sequence_label_size
lowercase : Optional[int] = initializer_range
lowercase : int = scope
lowercase : int = frequency_stride
lowercase : Dict = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase : Tuple = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase : Any = frequency_out_dimension * time_out_dimension
lowercase : List[str] = num_patches + 2
def lowercase ( self : int ) -> Optional[int]:
lowercase : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : str = self.get_config()
return config, input_values, labels
def lowercase ( self : List[str] ) -> Any:
return ASTConfig(
patch_size=self.patch_size, max_length=self.max_length, num_mel_bins=self.num_mel_bins, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, frequency_stride=self.frequency_stride, time_stride=self.time_stride, )
def lowercase ( self : str, lowerCAmelCase : List[Any], lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
lowercase : Any = ASTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowercase : Any = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Any ) -> Tuple:
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict = config_and_inputs
lowercase : Union[str, Any] = {'input_values': input_values}
return config, inputs_dict
@require_torch
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : Any, lowerCAmelCase : Any, lowerCAmelCase : Tuple, lowerCAmelCase : Dict, lowerCAmelCase : List[str], lowerCAmelCase : int ) -> Tuple:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase ( self : Optional[Any] ) -> Dict:
lowercase : List[Any] = ASTModelTester(self )
lowercase : Any = ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def lowercase ( self : Tuple ) -> List[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowercase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] = model_class(lowerCAmelCase )
lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] = [*signature.parameters.keys()]
lowercase : str = ['input_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase ( self : Optional[int] ) -> Tuple:
lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@slow
def lowercase ( self : List[str] ) -> Optional[Any]:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = ASTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( ) -> Any:
'''simple docstring'''
lowercase : Tuple = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
lowercase , lowercase : List[str] = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class a__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def lowercase ( self : Any ) -> Optional[Any]:
lowercase : List[str] = self.default_feature_extractor
lowercase : Tuple = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(lowerCAmelCase )
lowercase : List[str] = self.default_feature_extractor
lowercase , lowercase : Optional[int] = prepare_audio()
lowercase : List[str] = audio.squeeze().numpy()
lowercase : List[Any] = feature_extractor(lowerCAmelCase, sampling_rate=lowerCAmelCase, return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase : List[Any] = model(**lowerCAmelCase )
# verify the logits
lowercase : Union[str, Any] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowercase : Any = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
| 53
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
__snake_case = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
__snake_case = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : int = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = ["""input_ids""", """attention_mask"""]
A_ : List[int] = []
A_ : List[int] = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="m2m100" , __UpperCAmelCase = None , __UpperCAmelCase=8 , **__UpperCAmelCase , ) -> None:
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCAmelCase__ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCAmelCase__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , language_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase__ , **lowerCAmelCase__ , )
_a = vocab_file
_a = load_json(lowerCAmelCase__ )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(lowerCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else "en"
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
_a = []
_a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_a = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self ) -> Dict:
_a = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __UpperCAmelCase ) -> None:
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
_a = Path(lowerCAmelCase__ )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
_a = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en" , __UpperCAmelCase = None , __UpperCAmelCase = "ro" , **__UpperCAmelCase , ) -> BatchEncoding:
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ )
_a = self.get_lang_id(lowerCAmelCase__ )
_a = tgt_lang_id
return inputs
def _UpperCAmelCase ( self ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self ) -> List[Any]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = self.get_lang_token(lowerCAmelCase__ )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None:
_a = self.get_lang_token(lowerCAmelCase__ )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str:
return self.lang_code_to_token[lang]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
_a = self.get_lang_token(lowerCAmelCase__ )
return self.lang_token_to_id[lang_token]
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Dict[str, Any] ):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**_lowerCAmelCase )
spm.Load(str(_lowerCAmelCase ) )
return spm
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
with open(_lowerCAmelCase, '''r''' ) as f:
return json.load(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str ):
"""simple docstring"""
with open(_lowerCAmelCase, '''w''' ) as f:
json.dump(_lowerCAmelCase, _lowerCAmelCase, indent=2 )
| 320
|
import numpy
# List of input, output pairs
UpperCAmelCase : str = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : Optional[int] = (((515, 22, 13), 555), ((61, 35, 49), 150))
UpperCAmelCase : str = [2, 4, 1, 5]
UpperCAmelCase : List[str] = len(train_data)
UpperCAmelCase : Dict = 0.0_0_9
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple="train" ):
"""simple docstring"""
return calculate_hypothesis_value(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) - output(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Tuple =0
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _A ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int=m ):
"""simple docstring"""
a__ : Any =0
for i in range(SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(SCREAMING_SNAKE_CASE )
else:
summation_value += _error(SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =summation_of_cost_derivative(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _A ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a__ : Dict =0.0_0_0_0_0_2
a__ : Union[str, Any] =0
a__ : Any =0
while True:
j += 1
a__ : Any =[0, 0, 0, 0]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) ):
a__ : Tuple =get_cost_derivative(i - 1 )
a__ : List[Any] =(
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE , rtol=SCREAMING_SNAKE_CASE , ):
break
a__ : Optional[Any] =temp_parameter_vector
print(("Number of iterations:", j) )
def _A ( ):
"""simple docstring"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(SCREAMING_SNAKE_CASE , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(SCREAMING_SNAKE_CASE , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 95
| 0
|
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = [int(_lowerCamelCase ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(_lowerCamelCase ) == 4 and all(0 <= int(_lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =input().strip()
__lowerCAmelCase : List[str] ="valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 123
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = CLIPTokenizer
__lowercase = CLIPTokenizerFast
__lowercase = True
__lowercase = {}
__lowercase = False
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
super().setUp()
# fmt: off
A__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
A__ = {"unk_token": "<unk>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :List[str] )-> str:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Optional[int] )-> Tuple:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :List[Any] )-> List[str]:
A__ = "lower newer"
A__ = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self :int )-> List[str]:
A__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = "lower newer"
A__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
A__ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
A__ = tokens + [tokenizer.unk_token]
A__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
@require_ftfy
def UpperCAmelCase_ ( self :Dict )-> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
A__ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
A__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
A__ = tokenizer_s.tokenize(lowercase_ )
A__ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
A__ = "xa\u0303y" + " " + "x\xe3y"
A__ = tokenizer_s.tokenize(lowercase_ )
A__ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of space type
A__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
A__ = tokenizer_s.tokenize(lowercase_ )
A__ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Test that the tokenization is identical on unicode of line break type
A__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
A__ = tokenizer_s.tokenize(lowercase_ )
A__ = tokenizer_r.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F"{text_of_1_token} {text_of_1_token}"
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase_ ) + 1, len(lowercase_ ) + 1 + len(lowercase_ )) , )
A__ = F" {text}"
A__ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , use_fast=lowercase_ , )
A__ = tokenizer_r(lowercase_ , return_offsets_mapping=lowercase_ , add_special_tokens=lowercase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase_ ) + 1, 1 + len(lowercase_ ) + 1 + len(lowercase_ )) , )
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowercase_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self :str )-> Any:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]:
# CLIP always lower cases letters
pass
| 123
| 1
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=1E-12 ):
lowercase_ : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
lowercase_ : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class UpperCamelCase ( nn.Module ):
lowercase = 42
lowercase = jnp.floataa
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowercase_ : Any = nn.Dense(self.config.projection_dim ,use_bias=__UpperCamelCase ,dtype=self.dtype )
lowercase_ : int = self.param('concept_embeds' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
lowercase_ : Any = self.param(
'special_care_embeds' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
lowercase_ : List[str] = self.param('concept_embeds_weights' ,jax.nn.initializers.ones ,(17,) )
lowercase_ : Optional[int] = self.param('special_care_embeds_weights' ,jax.nn.initializers.ones ,(3,) )
def __call__( self ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = self.vision_model(__UpperCamelCase )[1]
lowercase_ : int = self.visual_projection(__UpperCamelCase )
lowercase_ : Dict = jax_cosine_distance(__UpperCamelCase ,self.special_care_embeds )
lowercase_ : Any = jax_cosine_distance(__UpperCamelCase ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowercase_ : List[str] = 0.0
lowercase_ : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowercase_ : Dict = jnp.round(__UpperCamelCase ,3 )
lowercase_ : List[Any] = jnp.any(special_scores > 0 ,axis=1 ,keepdims=__UpperCamelCase )
# Use a lower threshold if an image has any special care concept
lowercase_ : int = is_special_care * 0.01
lowercase_ : int = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowercase_ : str = jnp.round(__UpperCamelCase ,3 )
lowercase_ : Optional[int] = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class UpperCamelCase ( lowercase_ ):
lowercase = CLIPConfig
lowercase = 'clip_input'
lowercase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = 0 ,__UpperCamelCase = jnp.floataa ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
if input_shape is None:
lowercase_ : int = (1, 224, 224, 3)
lowercase_ : Optional[Any] = self.module_class(config=__UpperCamelCase ,dtype=__UpperCamelCase ,**__UpperCamelCase )
super().__init__(__UpperCamelCase ,__UpperCamelCase ,input_shape=__UpperCamelCase ,seed=__UpperCamelCase ,dtype=__UpperCamelCase ,_do_init=_do_init )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ) -> FrozenDict:
'''simple docstring'''
lowercase_ : Optional[int] = jax.random.normal(__UpperCamelCase ,__UpperCamelCase )
lowercase_ , lowercase_ : int = jax.random.split(__UpperCamelCase )
lowercase_ : List[str] = {'params': params_rng, 'dropout': dropout_rng}
lowercase_ : Dict = self.module.init(__UpperCamelCase ,__UpperCamelCase )['params']
return random_params
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,) -> str:
'''simple docstring'''
lowercase_ : List[Any] = jnp.transpose(__UpperCamelCase ,(0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} ,jnp.array(__UpperCamelCase ,dtype=jnp.floataa ) ,rngs={} ,)
| 213
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
class UpperCamelCase ( lowercase_ ):
lowercase = ['pixel_values']
def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 0.9 ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 224}
lowercase_ : Union[str, Any] = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase_ : Optional[int] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : List[str] = do_resize
lowercase_ : List[Any] = size
lowercase_ : int = crop_pct
lowercase_ : Dict = resample
lowercase_ : List[str] = do_center_crop
lowercase_ : Union[str, Any] = crop_size
lowercase_ : List[Any] = do_rescale
lowercase_ : Tuple = rescale_factor
lowercase_ : Tuple = do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : Any = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ : Union[str, Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ : Tuple = int(size['height'] / crop_pct )
else:
lowercase_ : Dict = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
lowercase_ : int = get_resize_output_image_size(__UpperCamelCase ,size=__UpperCamelCase ,default_to_square=__UpperCamelCase )
else:
if "shortest_edge" in size:
lowercase_ : Optional[int] = get_resize_output_image_size(__UpperCamelCase ,size=size['shortest_edge'] ,default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
lowercase_ : Dict = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(__UpperCamelCase ) )
return resize(__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
lowercase_ : List[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCamelCase ,size=(size['height'], size['width']) ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> str:
'''simple docstring'''
return rescale(__UpperCamelCase ,scale=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ : List[str] = resample if resample is not None else self.resample
lowercase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ : List[str] = get_size_dict(__UpperCamelCase ,param_name='crop_size' )
lowercase_ : str = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase_ : Optional[Any] = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
lowercase_ : str = [self.resize(image=__UpperCamelCase ,size=__UpperCamelCase ,crop_pct=__UpperCamelCase ,resample=__UpperCamelCase ) for image in images]
if do_center_crop:
lowercase_ : str = [self.center_crop(image=__UpperCamelCase ,size=__UpperCamelCase ) for image in images]
if do_rescale:
lowercase_ : Any = [self.rescale(image=__UpperCamelCase ,scale=__UpperCamelCase ) for image in images]
if do_normalize:
lowercase_ : int = [self.normalize(image=__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ) for image in images]
lowercase_ : Dict = [to_channel_dimension_format(__UpperCamelCase ,__UpperCamelCase ) for image in images]
lowercase_ : Any = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase ,tensor_type=__UpperCamelCase )
| 213
| 1
|
from manim import *
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
def lowercase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = Text('''CPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = Text('''GPU''' , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = Text('''Model''' , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i, rect in enumerate(__lowerCamelCase ):
rect.set_stroke(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__lowerCamelCase , buff=0.0 )
self.add(__lowerCamelCase )
model_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = Text('''Loaded Checkpoint''' , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = fill.copy().set_fill(__lowerCamelCase , opacity=0.7 )
target.move_to(__lowerCamelCase )
ckpt_arr.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowerCamelCase )
self.add(*__lowerCamelCase , *__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE__ = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE__ = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 )
SCREAMING_SNAKE_CASE__ = Text('''Disk''' , font_size=24 )
SCREAMING_SNAKE_CASE__ = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) , Write(__lowerCamelCase , run_time=1 ) , Create(__lowerCamelCase , run_time=1 ) )
SCREAMING_SNAKE_CASE__ = []
for i, rect in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowerCamelCase , run_time=1.5 ) )
self.play(*__lowerCamelCase )
self.play(FadeOut(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCamelCase , run_time=3 ) )
self.play(
FadeOut(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , *__lowerCamelCase ) , )
self.wait()
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "transfo-xl"
a = ["mems"]
a = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , __lowerCamelCase : int=26_7735 , __lowerCamelCase : Any=[2_0000, 4_0000, 20_0000] , __lowerCamelCase : Dict=1024 , __lowerCamelCase : Optional[int]=1024 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Dict=4096 , __lowerCamelCase : int=4 , __lowerCamelCase : Dict=False , __lowerCamelCase : Tuple=18 , __lowerCamelCase : Optional[int]=1600 , __lowerCamelCase : str=1000 , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=True , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=True , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : int=0.0 , __lowerCamelCase : int=True , __lowerCamelCase : str="normal" , __lowerCamelCase : List[str]=0.01 , __lowerCamelCase : Any=0.01 , __lowerCamelCase : List[str]=0.02 , __lowerCamelCase : List[str]=1e-5 , __lowerCamelCase : Union[str, Any]=0 , **__lowerCamelCase : int , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = []
self.cutoffs.extend(__lowerCamelCase )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE__ = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE__ = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_head
SCREAMING_SNAKE_CASE__ = d_inner
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = pre_lnorm
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = mem_len
SCREAMING_SNAKE_CASE__ = same_length
SCREAMING_SNAKE_CASE__ = attn_type
SCREAMING_SNAKE_CASE__ = clamp_len
SCREAMING_SNAKE_CASE__ = sample_softmax
SCREAMING_SNAKE_CASE__ = adaptive
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = dropatt
SCREAMING_SNAKE_CASE__ = untie_r
SCREAMING_SNAKE_CASE__ = init
SCREAMING_SNAKE_CASE__ = init_range
SCREAMING_SNAKE_CASE__ = proj_init_std
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def lowercase_ ( self : str ) -> Dict:
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowercase_ ( self : List[str] , __lowerCamelCase : Any ) -> List[Any]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 218
| 1
|
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[int] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Tuple=13 ,lowerCamelCase_: int=7 ,lowerCamelCase_: Union[str, Any]=True ,lowerCamelCase_: Dict=True ,lowerCamelCase_: str=True ,lowerCamelCase_: Tuple=True ,lowerCamelCase_: int=99 ,lowerCamelCase_: List[str]=64 ,lowerCamelCase_: Tuple=32 ,lowerCamelCase_: List[str]=5 ,lowerCamelCase_: str=4 ,lowerCamelCase_: str=37 ,lowerCamelCase_: Union[str, Any]="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=0.1 ,lowerCamelCase_: List[str]=512 ,lowerCamelCase_: Dict=16 ,lowerCamelCase_: List[str]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: Optional[Any]=3 ,lowerCamelCase_: Union[str, Any]=4 ,lowerCamelCase_: str=None ,) -> List[str]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Any = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = embedding_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : List[str] = scope
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_input_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Dict = None
if self.use_token_type_ids:
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self: Any ) -> Dict:
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,)
def A__ ( self: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: str ) -> int:
UpperCAmelCase_ : Any = MobileBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,token_type_ids=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def A__ ( self: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ) -> int:
UpperCAmelCase_ : Union[str, Any] = MobileBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self: str ,lowerCamelCase_: Any ,lowerCamelCase_: Dict ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: int ) -> int:
UpperCAmelCase_ : List[Any] = MobileBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Union[str, Any] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def A__ ( self: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = MobileBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,next_sentence_label=lowerCamelCase_ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def A__ ( self: Any ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Any ,lowerCamelCase_: int ,lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = MobileBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : int = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,start_positions=lowerCamelCase_ ,end_positions=lowerCamelCase_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def A__ ( self: List[str] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Tuple ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Any ) -> str:
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Any:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self: Tuple ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_choices
UpperCAmelCase_ : Tuple = MobileBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
UpperCAmelCase_ : Optional[int] = model(
lowerCamelCase_ ,attention_mask=lowerCamelCase_ ,token_type_ids=lowerCamelCase_ ,labels=lowerCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def A__ ( self: List[str] ) -> str:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : List[str] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
def A__ ( self: Dict ,lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple ,lowerCamelCase_: int=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
UpperCAmelCase_ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase_ )
return inputs_dict
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : List[str] = MobileBertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,hidden_size=37 )
def A__ ( self: Optional[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def A__ ( self: List[str] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def A__ ( self: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def A__ ( self: Optional[Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def A__ ( self: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def A__ ( self: Any ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
def lowerCamelCase_ ( _a : Union[str, Any] ):
'''simple docstring'''
return torch.tensor(
_a , dtype=torch.long , device=_a , )
UpperCamelCase_ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self: List[Any] ) -> str:
UpperCAmelCase_ : Any = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCamelCase_ )
UpperCAmelCase_ : str = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )[0]
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 9, 512) )
self.assertEqual(output.shape ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] ,device=lowerCamelCase_ ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCAmelCase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 345
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "time_series_transformer"
UpperCAmelCase : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.0_2 , A_=True , **A_ , ) -> Optional[Any]:
# time series specific configuration
lowerCAmelCase = prediction_length
lowerCAmelCase = context_length or prediction_length
lowerCAmelCase = distribution_output
lowerCAmelCase = loss
lowerCAmelCase = input_size
lowerCAmelCase = num_time_features
lowerCAmelCase = lags_sequence
lowerCAmelCase = scaling
lowerCAmelCase = num_dynamic_real_features
lowerCAmelCase = num_static_real_features
lowerCAmelCase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = cardinality
else:
lowerCAmelCase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCAmelCase = embedding_dimension
else:
lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase = input_size * len(A_ ) + self._number_of_features
lowerCAmelCase = d_model
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = decoder_layers
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def __snake_case ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 187
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase : Optional[int] = "dpt"
def __init__( self : int , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : Tuple=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Dict=1E-12 , lowerCamelCase__ : List[Any]=3_84 , lowerCamelCase__ : List[str]=16 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=[2, 5, 8, 11] , lowerCamelCase__ : Any="project" , lowerCamelCase__ : List[str]=[4, 2, 1, 0.5] , lowerCamelCase__ : Union[str, Any]=[96, 1_92, 3_84, 7_68] , lowerCamelCase__ : List[str]=2_56 , lowerCamelCase__ : str=-1 , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=0.4 , lowerCamelCase__ : int=2_55 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[Any]=[1, 10_24, 24, 24] , lowerCamelCase__ : List[str]=[0, 1] , lowerCamelCase__ : Optional[Any]=None , **lowerCamelCase__ : Tuple , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**__A )
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
_UpperCAmelCase : str = BitConfig(**__A )
elif isinstance(__A , __A ):
logger.info("Initializing the config with a `BiT` backbone." )
_UpperCAmelCase : List[str] = BitConfig(**__A )
elif isinstance(__A , __A ):
_UpperCAmelCase : Dict = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." )
else:
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : Optional[Any] = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Union[str, Any] = head_in_index
_UpperCAmelCase : Tuple = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : Tuple = semantic_loss_ignore_index
_UpperCAmelCase : Optional[int] = semantic_classifier_dropout
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : str = self.__class__.model_type
return output
| 234
|
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( _a ,unittest.TestCase ):
lowercase_ = KandinskyVaaInpaintPipeline
lowercase_ = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowercase_ = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowercase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowercase_ = False
@property
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return 1_00
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a = UNetaDConditionModel(**lowerCAmelCase_ )
return model
@property
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_a = self.dummy_unet
_a = self.dummy_movq
_a = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowerCAmelCase_ , )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase_ )
# create init_image
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
_a = np.ones((64, 64) , dtype=np.floataa )
_a = 0
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
_a = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = pipe(**self.get_dummy_inputs(lowerCAmelCase_ ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(lowerCAmelCase_ ) , return_dict=lowerCAmelCase_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a = np.ones((7_68, 7_68) , dtype=np.floataa )
_a = 0
_a = '''a hat'''
_a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase_ )
_a = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
_a = pipeline.to(lowerCAmelCase_ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a = pipe_prior(
lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a = pipeline(
image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , image_embeds=lowerCAmelCase_ , negative_image_embeds=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ )
| 358
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : int = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = ['YolosFeatureExtractor']
_snake_case : Optional[int] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 0
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any:
__snake_case : List[Any] = dataset
__snake_case : Optional[int] = process
__snake_case : str = params
def __len__( self : Optional[Any] ) -> Any:
return len(self.dataset )
def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]:
__snake_case : List[Any] = self.dataset[i]
__snake_case : Tuple = self.process(lowerCamelCase , **self.params )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int:
__snake_case : List[Any] = loader
__snake_case : Dict = infer
__snake_case : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = loader_batch_size
# Internal bookkeeping
__snake_case : int = None
__snake_case : Optional[int] = None
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.loader )
def __iter__( self : str ) -> Tuple:
__snake_case : int = iter(self.loader )
return self
def __snake_case ( self : int ) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
# Convert ModelOutput to tuple first
__snake_case : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__snake_case : Union[str, Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__snake_case : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__snake_case : str = self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def __snake_case ( self : Dict ) -> Union[str, Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__snake_case : List[str] = next(self.iterator )
__snake_case : int = self.infer(lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : List[Any] = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : List[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[str] = len(lowerCamelCase )
else:
__snake_case : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
__snake_case : Union[str, Any] = processed
__snake_case : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any:
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __iter__( self : Optional[int] ) -> Optional[int]:
__snake_case : Union[str, Any] = iter(self.loader )
__snake_case : int = None
return self
def __snake_case ( self : List[Any] ) -> List[Any]:
if self.subiterator is None:
__snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__snake_case : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
__snake_case : int = next(self.subiterator )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __iter__( self : Any ) -> Optional[Any]:
__snake_case : str = iter(self.loader )
return self
def __snake_case ( self : Tuple ) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__snake_case : Dict = False
__snake_case : Dict = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__snake_case : Union[str, Any] = self.loader_batch_item()
__snake_case : Any = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
__snake_case : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Optional[int] = processed
else:
__snake_case : Union[str, Any] = list(processed.keys() )[0]
__snake_case : Optional[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = len(lowerCamelCase )
else:
__snake_case : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Dict = observed_batch_size
__snake_case : Union[str, Any] = processed
__snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
__snake_case : str = self.loader_batch_item()
__snake_case : str = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
__snake_case : List[str] = processed
__snake_case : Tuple = item.pop("is_last" )
accumulator.append(lowerCamelCase )
return accumulator
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : int = dataset
__snake_case : Union[str, Any] = key
def __len__( self : Tuple ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]:
return self.dataset[i][self.key]
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
__snake_case : Any = dataset
__snake_case : Any = keya
__snake_case : Union[str, Any] = keya
def __len__( self : Optional[int] ) -> Tuple:
return len(self.dataset )
def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 123
|
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase_ ( __lowerCamelCase ):
if edge <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError("Length must be a positive." )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123
| 1
|
from __future__ import annotations
from collections.abc import Callable
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1_0_0 , ) -> float:
'''simple docstring'''
__lowercase= x_start
__lowercase= fnc(lowercase__ )
__lowercase= 0.0
for _ in range(lowercase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__lowercase= (x_end - x_start) / steps + xa
__lowercase= fnc(lowercase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__lowercase= xa
__lowercase= fxa
return area
if __name__ == "__main__":
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
lowerCAmelCase = 1_0
while i <= 1_0_0_0_0_0:
print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}')
i *= 1_0
| 356
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_lowerCAmelCase : Dict = CLIPImageProcessor()
_lowerCAmelCase : Tuple = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_lowerCAmelCase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 218
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = GPTSwaTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a =GPTSwaTokenizer(__snake_case , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self , __snake_case ) -> Tuple:
'''simple docstring'''
__a ='This is a test'
__a ='This is a test'
return input_text, output_text
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a ='<s>'
__a =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__snake_case ) , 2000 )
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =tokenizer.tokenize('This is a test' )
self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [465, 287, 265, 631, 842] )
__a =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(
__snake_case , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a =tokenizer.convert_ids_to_tokens(__snake_case )
# fmt: off
self.assertListEqual(
__snake_case , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =GPTSwaTokenizer(__snake_case )
__a =['This is a test', 'I was born in 92000, and this is falsé.']
__a =[
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__snake_case , __snake_case ):
self.assertListEqual(tokenizer.encode_fast(__snake_case ) , __snake_case )
# Test that decode_fast returns the input text
for text, token_ids in zip(__snake_case , __snake_case ):
self.assertEqual(tokenizer.decode_fast(__snake_case ) , __snake_case )
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
__a ={'input_ids': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='AI-Sweden/gpt-sw3-126m' , sequences=__snake_case , )
| 218
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_UpperCAmelCase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
_UpperCAmelCase : Optional[Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class lowercase ( lowercase__ ):
__lowercase : Tuple = """whisper"""
__lowercase : int = ["""past_key_values"""]
__lowercase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A_=51_865 , A_=80 , A_=6 , A_=4 , A_=6 , A_=4 , A_=1_536 , A_=1_536 , A_=0.0 , A_=0.0 , A_=50_257 , A_=True , A_=True , A_="gelu" , A_=256 , A_=0.0 , A_=0.0 , A_=0.0 , A_=0.02 , A_=False , A_=1_500 , A_=448 , A_=50_256 , A_=50_256 , A_=50_256 , A_=None , A_=[220, 50_256] , A_=False , A_=256 , A_=False , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_=7 , **A_ , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , suppress_tokens=lowercase_ , begin_suppress_tokens=lowercase_ , **lowercase_ , )
class lowercase ( lowercase__ ):
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: "batch"}
else:
UpperCamelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='inputs' )
return common_inputs
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 22_050 , A_ = 5.0 , A_ = 220 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowercase_ , framework=lowercase_ , sampling_rate=lowercase_ , time_duration=lowercase_ , frequency=lowercase_ , )
UpperCamelCase = encoder_inputs["input_features"].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = encoder_inputs.pop('input_features' )
UpperCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return 1e-3
| 351
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
UpperCamelCase = {'unk_token': '<unk>'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
UpperCamelCase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Tuple:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **A_ )
def __UpperCamelCase ( self , **A_ ) -> Union[str, Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=A_ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(A_ , return_tensors='np' )
UpperCamelCase = processor(images=A_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=A_ , return_tensors='np' )
UpperCamelCase = tokenizer(A_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = [['cat', 'nasa badge'], ['person']]
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = len(A_ )
UpperCamelCase = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = 'google/owlvit-base-patch32'
UpperCamelCase = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase = ['cat', 'nasa badge']
UpperCamelCase = processor(text=A_ )
UpperCamelCase = 16
UpperCamelCase = inputs['input_ids']
UpperCamelCase = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(A_ )
UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 110
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
lowerCamelCase__ = 42
lowerCamelCase__ = None
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any=0.9_9_9 , _lowerCAmelCase : Optional[Any]="cosine" , ) -> List[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : List[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase : Any = []
for i in range(_lowerCAmelCase ):
UpperCAmelCase : Dict = i / num_diffusion_timesteps
UpperCAmelCase : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __snake_case : int = 1000 , __snake_case : str = "fixed_small_log" , __snake_case : bool = True , __snake_case : Optional[float] = 1.0 , __snake_case : str = "epsilon" , __snake_case : str = "squaredcos_cap_v2" , ) -> str:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
UpperCAmelCase : Union[str, Any] = betas_for_alpha_bar(__snake_case )
UpperCAmelCase : List[Any] = 1.0 - self.betas
UpperCAmelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase : List[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase : int = 1.0
# setable values
UpperCAmelCase : str = None
UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.arange(0 , __snake_case )[::-1].copy() )
UpperCAmelCase : Optional[Any] = variance_type
def A ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None ) -> torch.FloatTensor:
return sample
def A ( self : Dict , __snake_case : int , __snake_case : Union[str, torch.device] = None ) -> Optional[Any]:
UpperCAmelCase : List[str] = num_inference_steps
UpperCAmelCase : Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase : str = (np.arange(0 , __snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase : Optional[int] = torch.from_numpy(__snake_case ).to(__snake_case )
def A ( self : Any , __snake_case : str , __snake_case : List[str]=None , __snake_case : str=None , __snake_case : List[str]=None ) -> int:
if prev_timestep is None:
UpperCAmelCase : Optional[int] = t - 1
UpperCAmelCase : Any = self.alphas_cumprod[t]
UpperCAmelCase : Any = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Any = 1 - alpha_prod_t
UpperCAmelCase : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : Optional[int] = self.betas[t]
else:
UpperCAmelCase : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : int = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase : Optional[Any] = torch.log(torch.clamp(__snake_case , min=1E-20 ) )
UpperCAmelCase : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase : Tuple = variance.log()
UpperCAmelCase : List[Any] = beta.log()
UpperCAmelCase : List[Any] = (predicted_variance + 1) / 2
UpperCAmelCase : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None , __snake_case : int=None , __snake_case : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
UpperCAmelCase : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase , UpperCAmelCase : Tuple = torch.split(__snake_case , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : int = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase : Optional[Any] = t - 1
UpperCAmelCase : str = self.alphas_cumprod[t]
UpperCAmelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : Tuple = self.betas[t]
UpperCAmelCase : Optional[Any] = self.alphas[t]
else:
UpperCAmelCase : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : Union[str, Any] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : int = torch.clamp(
__snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase : Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : int = 0
if t > 0:
UpperCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__snake_case , device=model_output.device )
UpperCAmelCase : Optional[Any] = self._get_variance(
__snake_case , predicted_variance=__snake_case , prev_timestep=__snake_case , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase : Tuple = variance
elif self.variance_type == "learned_range":
UpperCAmelCase : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
''' for the UnCLIPScheduler.''' )
UpperCAmelCase : Dict = variance * variance_noise
UpperCAmelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__snake_case , pred_original_sample=__snake_case )
def A ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.IntTensor , ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase : Dict = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase : Tuple = timesteps.to(original_samples.device )
UpperCAmelCase : int = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase : Optional[int] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : Optional[int] = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 23
|
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = []
snake_case_ = 1
while len(_A ) < 1E6:
constant.append(str(_A ) )
i += 1
snake_case_ = "".join(_A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 187
| 0
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a : Optional[int] = logging.getLogger(__name__)
a : List[str] = 'Hello world! cécé herlolip'
a : Any = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCAmelCase_ (lowerCAmelCase__: Tuple , lowerCAmelCase__: Optional[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCAmelCase__ , large=lowerCAmelCase__ , share_emb=lowerCAmelCase__ , use_bert_emb=lowerCAmelCase__ , encoder="""bert""" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
UpperCAmelCase_: int = torch.load(lowerCAmelCase__ , lambda lowerCAmelCase__ , lowerCAmelCase__ : storage )
UpperCAmelCase_: List[Any] = AbsSummarizer(lowerCAmelCase__ , torch.device("""cpu""" ) , lowerCAmelCase__ )
original.eval()
UpperCAmelCase_: Optional[int] = BertAbsSummarizer(lowerCAmelCase__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
UpperCAmelCase_: Union[str, Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
UpperCAmelCase_: Optional[int] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCAmelCase__ )) )
UpperCAmelCase_: List[Any] = torch.tensor(lowerCAmelCase__ ).unsqueeze(0 )
UpperCAmelCase_: Optional[Any] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCAmelCase__ )) )
UpperCAmelCase_: Any = torch.tensor(lowerCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
UpperCAmelCase_: Optional[Any] = encoder_input_ids
UpperCAmelCase_: Tuple = decoder_input_ids
UpperCAmelCase_: int = None
UpperCAmelCase_: Dict = None
UpperCAmelCase_: int = None
UpperCAmelCase_: Union[str, Any] = None
UpperCAmelCase_: Union[str, Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
UpperCAmelCase_: List[Any] = original(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )[0]
UpperCAmelCase_: Any = original.generator(lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = new_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )[0]
UpperCAmelCase_: List[Any] = new_model.generator(lowerCAmelCase__ )
UpperCAmelCase_: List[str] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCAmelCase__ ) )
UpperCAmelCase_: str = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCAmelCase__ ) )
UpperCAmelCase_: Any = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a : str = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 82
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
a : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
a : Any = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: int=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: int = create_model(
"""HTSAT-tiny""" , """roberta""" , lowerCAmelCase__ , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowerCAmelCase__ , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
UpperCAmelCase_: Optional[Any] = r""".*sequential.(\d+).*"""
UpperCAmelCase_: str = r""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_: Optional[int] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase_: int = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase_: Dict = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(lowerCAmelCase__ )//3}.linear.' )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_: int = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase_: Optional[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase_: Tuple = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase_: str = value
UpperCAmelCase_: Optional[int] = mixed_qkv.size(0 ) // 3
UpperCAmelCase_: Optional[int] = mixed_qkv[:qkv_dim]
UpperCAmelCase_: List[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase_: int = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase_: str = query_layer
UpperCAmelCase_: List[Any] = key_layer
UpperCAmelCase_: str = value_layer
else:
UpperCAmelCase_: Tuple = value
return model_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: List[Any]=False ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase_: Optional[Any] = clap_model.state_dict()
UpperCAmelCase_: Optional[Any] = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase_: Dict = ClapConfig()
UpperCAmelCase_: Tuple = enable_fusion
UpperCAmelCase_: int = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
a : Optional[Any] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 82
| 1
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase__ ( __UpperCamelCase )-> List[Any]:
UpperCamelCase = image.size
UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
UpperCamelCase = np.array(snake_case_ ).astype(np.floataa ) / 255.0
UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase = torch.from_numpy(snake_case_ )
return 2.0 * image - 1.0
class a_ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> Dict:
"""simple docstring"""
if isinstance(__lowerCamelCase , PIL.Image.Image ):
UpperCamelCase = 1
elif isinstance(__lowerCamelCase , torch.Tensor ):
UpperCamelCase = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase )}" )
if isinstance(__lowerCamelCase , PIL.Image.Image ):
UpperCamelCase = preprocess(__lowerCamelCase )
UpperCamelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase = next(self.unet.parameters() ).dtype
UpperCamelCase = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
UpperCamelCase = image.to(device=self.device , dtype=__lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device )
UpperCamelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase = {}
if accepts_eta:
UpperCamelCase = eta
for t in self.progress_bar(__lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase = torch.cat([latents, image] , dim=1 )
UpperCamelCase = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
UpperCamelCase = self.unet(__lowerCamelCase , __lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase = self.vqvae.decode(__lowerCamelCase ).sample
UpperCamelCase = torch.clamp(__lowerCamelCase , -1.0 , 1.0 )
UpperCamelCase = image / 2 + 0.5
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
| 321
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """microsoft/speecht5_tts"""
_lowerCamelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_lowerCamelCase = """text_reader"""
_lowerCamelCase = SpeechTaProcessor
_lowerCamelCase = SpeechTaForTextToSpeech
_lowerCamelCase = SpeechTaHifiGan
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""audio"""]
def UpperCamelCase__( self ):
'''simple docstring'''
if self.post_processor is None:
__A : List[str] = '''microsoft/speecht5_hifigan'''
super().setup()
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : int = self.pre_processor(text=__lowerCamelCase , return_tensors='''pt''' , truncation=__lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
__A : List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
__A : int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(__lowerCamelCase ).cpu().detach()
| 179
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Any = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Tuple = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
__SCREAMING_SNAKE_CASE :Any = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__SCREAMING_SNAKE_CASE :Tuple = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__SCREAMING_SNAKE_CASE :Optional[int] = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowercase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def lowercase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int]=None , snake_case_ : str=1 , snake_case_ : str="binary" , snake_case_ : int=None , snake_case_ : List[Any]="warn" , ):
_UpperCAmelCase = recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 156
| 0
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = None ) -> Dict:
'''simple docstring'''
self.set_timesteps(_SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
UpperCAmelCase : Optional[int] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCAmelCase : List[str] = 4
# running values
UpperCAmelCase : str = []
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = num_inference_steps
UpperCAmelCase : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCAmelCase : str = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCAmelCase : Optional[Any] = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCAmelCase : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
UpperCAmelCase : Dict = (1.0 - self.betas**2) ** 0.5
UpperCAmelCase : List[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCAmelCase : List[Any] = timesteps.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = []
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCAmelCase : Optional[int] = (self.timesteps == timestep).nonzero().item()
UpperCAmelCase : Optional[Any] = timestep_index + 1
UpperCAmelCase : Union[str, Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
UpperCAmelCase : Union[str, Any] = self.ets[-1]
elif len(self.ets ) == 2:
UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCAmelCase : str = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCAmelCase : List[str] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCAmelCase : Union[str, Any] = self._get_prev_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.alphas[timestep_index]
UpperCAmelCase : List[str] = self.betas[timestep_index]
UpperCAmelCase : Tuple = self.alphas[prev_timestep_index]
UpperCAmelCase : int = self.betas[prev_timestep_index]
UpperCAmelCase : List[Any] = (sample - sigma * ets) / max(_SCREAMING_SNAKE_CASE , 1E-8 )
UpperCAmelCase : Union[str, Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> int:
'''simple docstring'''
return self.config.num_train_timesteps
| 109
|
'''simple docstring'''
import functools
def __UpperCAmelCase ( A : str , A : str ) -> int:
UpperCAmelCase_ : Optional[Any] = len(A )
UpperCAmelCase_ : List[str] = len(A )
@functools.cache
def min_distance(A : int , A : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
| 0
|
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase ( A_ , A_ , A_ , A_=5 )-> Union[str, Any]:
'''simple docstring'''
assert masked_input.count("<mask>" ) == 1
a : List[str] = torch.tensor(tokenizer.encode(A_ , add_special_tokens=A_ ) ).unsqueeze(0 ) # Batch size 1
a : Dict = model(A_ )[0] # The last hidden-state is the first element of the output tuple
a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
a : Optional[Any] = logits[0, masked_index, :]
a : Dict = logits.softmax(dim=0 )
a , a : Any = prob.topk(k=A_ , dim=0 )
a : Optional[Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(A_ ) )] )
a : str = tokenizer.mask_token
a : Any = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
a : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(A_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(A_ ) , A_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(A_ , A_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowercase = CamembertTokenizer.from_pretrained("""camembert-base""")
__lowercase = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
__lowercase = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 226
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """gpt_neox"""
def __init__( self : List[str] , __UpperCAmelCase : Tuple=50432 , __UpperCAmelCase : str=6144 , __UpperCAmelCase : Any=44 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=24576 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : List[Any]=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Tuple , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : Dict = rotary_pct
a : Any = rotary_emb_base
a : Dict = attention_dropout
a : List[str] = hidden_dropout
a : List[str] = classifier_dropout
a : Any = initializer_range
a : Union[str, Any] = layer_norm_eps
a : int = use_cache
a : int = tie_word_embeddings
a : str = use_parallel_residual
a : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def __snake_case ( self : Any):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''')
a : str = self.rope_scaling.get("type" , __UpperCAmelCase)
a : List[str] = self.rope_scaling.get("factor" , __UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 226
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : Optional[int] = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class __snake_case ( lowerCAmelCase ):
_a : Any= "camembert"
def __init__( self ,snake_case=30522 ,snake_case=768 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=1 ,snake_case=0 ,snake_case=2 ,snake_case="absolute" ,snake_case=True ,snake_case=None ,**snake_case ,):
'''simple docstring'''
super().__init__(pad_token_id=snake_case ,bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
lowercase : List[Any] = vocab_size
lowercase : Tuple = hidden_size
lowercase : Union[str, Any] = num_hidden_layers
lowercase : List[str] = num_attention_heads
lowercase : Optional[Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : Any = hidden_dropout_prob
lowercase : List[str] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Tuple = type_vocab_size
lowercase : Union[str, Any] = initializer_range
lowercase : Tuple = layer_norm_eps
lowercase : Dict = position_embedding_type
lowercase : Union[str, Any] = use_cache
lowercase : Optional[int] = classifier_dropout
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 20
|
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 110
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
snake_case_ = nn.Linear(3 , 4 )
snake_case_ = nn.BatchNormad(4 )
snake_case_ = nn.Linear(4 , 5 )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return self.lineara(self.batchnorm(self.lineara(_UpperCAmelCase ) ) )
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , model.state_dict() )
snake_case_ = os.path.join(_UpperCAmelCase , '''index.json''' )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
snake_case_ = os.path.join(_UpperCAmelCase , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# TODO: add tests on the fact weights are properly loaded
def UpperCamelCase__ ( self ):
snake_case_ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
snake_case_ = torch.randn(2 , 3 , dtype=_UpperCAmelCase )
with TemporaryDirectory() as tmp_dir:
snake_case_ = offload_weight(_UpperCAmelCase , '''weight''' , _UpperCAmelCase , {} )
snake_case_ = os.path.join(_UpperCAmelCase , '''weight.dat''' )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
self.assertDictEqual(_UpperCAmelCase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_UpperCAmelCase ).split('''.''' )[1]}} )
snake_case_ = load_offloaded_weight(_UpperCAmelCase , index['''weight'''] )
self.assertTrue(torch.equal(_UpperCAmelCase , _UpperCAmelCase ) )
def UpperCamelCase__ ( self ):
snake_case_ = ModelForTest()
snake_case_ = model.state_dict()
snake_case_ = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
snake_case_ = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
snake_case_ = {k: v for k, v in state_dict.items() if '''weight''' in k}
snake_case_ = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_UpperCAmelCase , _UpperCAmelCase )
# Duplicates are removed
snake_case_ = OffloadedWeightsLoader(state_dict=_UpperCAmelCase , save_folder=_UpperCAmelCase )
# Every key is there with the right value
self.assertEqual(sorted(_UpperCAmelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_UpperCAmelCase , weight_map[key] ) )
def UpperCamelCase__ ( self ):
snake_case_ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
snake_case_ = extract_submodules_state_dict(_UpperCAmelCase , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_UpperCAmelCase , {'''a.1''': 0, '''a.2''': 2} )
snake_case_ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
snake_case_ = extract_submodules_state_dict(_UpperCAmelCase , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_UpperCAmelCase , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 267
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = torch.exp(SCREAMING_SNAKE_CASE )
snake_case_ = torch.sum(SCREAMING_SNAKE_CASE , dim=1 ) # sum of exp(x_i)
snake_case_ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(SCREAMING_SNAKE_CASE ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
snake_case_ = config.output_attentions
snake_case_ = config.output_hidden_states
snake_case_ = nn.ModuleList([BertLayer(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = nn.ModuleList([BertHighway(_UpperCAmelCase ) for _ in range(config.num_hidden_layers )] )
snake_case_ = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if (type(_UpperCAmelCase ) is float) or (type(_UpperCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case_ = x
else:
snake_case_ = x
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
snake_case_ = ()
snake_case_ = ()
snake_case_ = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = layer_module(
_UpperCAmelCase , _UpperCAmelCase , head_mask[i] , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = layer_outputs[0]
if self.output_attentions:
snake_case_ = all_attentions + (layer_outputs[1],)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = current_outputs + (all_attentions,)
snake_case_ = self.highway[i](_UpperCAmelCase )
# logits, pooled_output
if not self.training:
snake_case_ = highway_exit[0]
snake_case_ = entropy(_UpperCAmelCase )
snake_case_ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case_ = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case_ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_UpperCAmelCase , i + 1 )
else:
snake_case_ = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case_ = all_hidden_states + (hidden_states,)
snake_case_ = (hidden_states,)
if self.output_hidden_states:
snake_case_ = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case_ = outputs + (all_attentions,)
snake_case_ = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , lowerCamelCase__ , )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
snake_case_ = config
snake_case_ = BertEmbeddings(_UpperCAmelCase )
snake_case_ = DeeBertEncoder(_UpperCAmelCase )
snake_case_ = BertPooler(_UpperCAmelCase )
self.init_weights()
def UpperCamelCase__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase__ ( self ):
return self.embeddings.word_embeddings
def UpperCamelCase__ ( self , _UpperCAmelCase ):
snake_case_ = value
def UpperCamelCase__ ( self , _UpperCAmelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case_ = input_ids.size()
elif inputs_embeds is not None:
snake_case_ = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case_ = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if encoder_attention_mask is None:
snake_case_ = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
snake_case_ = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case_ = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case_ = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case_ = encoder_attention_mask[:, None, None, :]
snake_case_ = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case_ = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case_ = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
snake_case_ = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
snake_case_ = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(_UpperCAmelCase )
snake_case_ = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = message
snake_case_ = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__()
snake_case_ = BertPooler(_UpperCAmelCase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
# Pooler
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(_UpperCAmelCase )
# "return" pooler_output
# BertModel
snake_case_ = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case_ = bmodel_output[1]
snake_case_ = self.dropout(_UpperCAmelCase )
snake_case_ = self.classifier(_UpperCAmelCase )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , lowerCamelCase__ , )
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
snake_case_ = config.num_labels
snake_case_ = config.num_hidden_layers
snake_case_ = DeeBertModel(_UpperCAmelCase )
snake_case_ = nn.Dropout(config.hidden_dropout_prob )
snake_case_ = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=-1 , _UpperCAmelCase=False , ):
snake_case_ = self.num_layers
try:
snake_case_ = self.bert(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case_ = outputs[1]
snake_case_ = self.dropout(_UpperCAmelCase )
snake_case_ = self.classifier(_UpperCAmelCase )
snake_case_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ = e.message
snake_case_ = e.exit_layer
snake_case_ = outputs[0]
if not self.training:
snake_case_ = entropy(_UpperCAmelCase )
snake_case_ = []
snake_case_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ = []
for highway_exit in outputs[-1]:
snake_case_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ = MSELoss()
snake_case_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_UpperCAmelCase )
if train_highway:
snake_case_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ = (loss,) + outputs
if not self.training:
snake_case_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 267
| 1
|
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( snake_case , snake_case = 0.0 , snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCAmelCase_ : str = logging.get_logger(__name__)
class UpperCamelCase_ ( a_ ):
def __init__( self , *snake_case__ , **snake_case__ ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 248
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase , lowerCAmelCase )
# Predict target for test data
UpperCAmelCase = xgb.predict(lowerCAmelCase )
UpperCAmelCase = predictions.reshape(len(lowerCAmelCase ) , 1 )
return predictions
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = fetch_california_housing()
UpperCAmelCase , UpperCAmelCase = data_handling(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_test_split(
lowerCAmelCase , lowerCAmelCase , test_size=0.25 , random_state=1 )
UpperCAmelCase = xgboost(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase , lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCAmelCase , lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 248
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : str , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Tuple , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"shortest_edge": 224}
SCREAMING_SNAKE_CASE_: Any = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE_: str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size")
SCREAMING_SNAKE_CASE_: Optional[int] = do_resize
SCREAMING_SNAKE_CASE_: List[Any] = size
SCREAMING_SNAKE_CASE_: Optional[Any] = resample
SCREAMING_SNAKE_CASE_: List[Any] = do_center_crop
SCREAMING_SNAKE_CASE_: List[Any] = crop_size
SCREAMING_SNAKE_CASE_: Dict = do_rescale
SCREAMING_SNAKE_CASE_: Dict = rescale_factor
SCREAMING_SNAKE_CASE_: str = do_normalize
SCREAMING_SNAKE_CASE_: List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: List[str] = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
SCREAMING_SNAKE_CASE_: List[str] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__)
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[int] , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Dict , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: str = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: str = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: List[Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: List[str] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: str = [convert_to_rgb(lowerCAmelCase__) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Tuple = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: int = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: List[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Tuple = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 13
|
from collections import deque
from .hash_table import HashTable
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
super().__init__(*_snake_case , **_snake_case )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Dict ):
__lowercase : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_snake_case )
__lowercase : List[Any] = self.values[key]
def snake_case_ ( self : Any ):
return (
sum(self.charge_factor - len(_snake_case ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self : int , _snake_case : str , _snake_case : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_snake_case ) == 0
):
return key
return super()._collision_resolution(_snake_case , _snake_case )
| 156
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowercase_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , A__ : Dict , A__ : int=13 , A__ : str=7 , A__ : Optional[Any]=True , A__ : int=True , A__ : Union[str, Any]=True , A__ : int=True , A__ : List[str]=99 , A__ : List[str]=32 , A__ : Tuple=5 , A__ : Any=4 , A__ : Dict=37 , A__ : Tuple="gelu" , A__ : List[str]=0.1 , A__ : Any=0.1 , A__ : Union[str, Any]=512 , A__ : int=16 , A__ : Optional[int]=2 , A__ : Optional[Any]=0.02 , A__ : Optional[Any]=4 , ) -> List[str]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_attention_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_choices
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_attention_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : Any ) -> int:
_snake_case = self.prepare_config_and_inputs()
_snake_case = config_and_inputs
_snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowercase_ ( A__ , unittest.TestCase ):
UpperCamelCase_ : int = True
UpperCamelCase_ : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : List[str] ) -> int:
_snake_case = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ ( self : Dict ) -> str:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__A )
_snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : int ) -> Any:
_snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
_snake_case = model(__A )[0]
_snake_case = 50000
_snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , __A )
_snake_case = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 351
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__A = logging.get_logger(__name__)
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , *A__ : List[Any] , **A__ : int ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , A__ , )
super().__init__(*A__ , **A__ )
| 278
| 0
|
from math import factorial
def a ( _UpperCAmelCase : int = 1_00 ):
'''simple docstring'''
return sum(int(_UpperCAmelCase ) for x in str(factorial(_UpperCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 226
|
from __future__ import annotations
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Optional[int]=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = data
__UpperCAmelCase : Any = None
def __repr__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = []
__UpperCAmelCase : Any = self
while temp:
string_rep.append(F'{temp.data}' )
__UpperCAmelCase : Dict = temp.next
return "->".join(a_ )
def a ( _UpperCAmelCase : list ):
'''simple docstring'''
if not elements_list:
raise Exception('''The Elements List is empty''' )
__UpperCAmelCase : int = Node(elements_list[0] )
for i in range(1 , len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = Node(elements_list[i] )
__UpperCAmelCase : Optional[int] = current.next
return head
def a ( _UpperCAmelCase : Node ):
'''simple docstring'''
if head_node is not None and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
print_reverse(head_node.next )
print(head_node.data )
def a ( ):
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCAmelCase : Tuple = make_linked_list([14, 52, 14, 12, 43] )
print('''Linked List:''' )
print(_UpperCAmelCase )
print('''Elements in Reverse:''' )
print_reverse(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 226
| 1
|
import mpmath # for roots of unity
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Union[str, Any]=None):
# Input as list
a : List[Any] = list(poly_a or [0])[:]
a : Any = list(poly_b or [0])[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a : Optional[Any] = len(self.polyA)
while self.polyB[-1] == 0:
self.polyB.pop()
a : Dict = len(self.polyB)
# Add 0 to make lengths equal a power of 2
a : int = int(
2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1)))
while len(self.polyA) < self.c_max_length:
self.polyA.append(0)
while len(self.polyB) < self.c_max_length:
self.polyB.append(0)
# A complex root used for the fourier transform
a : Dict = complex(mpmath.root(x=1 , n=self.c_max_length , k=1))
# The product
a : Optional[int] = self.__multiply()
def __snake_case ( self : Optional[int] , __UpperCAmelCase : str):
a : Union[str, Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__lowerCAmelCase) <= 1:
return dft[0]
#
a : str = self.c_max_length // 2
while next_ncol > 0:
a : Tuple = [[] for i in range(__lowerCAmelCase)]
a : Tuple = self.root**next_ncol
# First half of next step
a : Optional[int] = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(__lowerCAmelCase):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j])
current_root *= root
# Second half of next step
a : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2)):
for i in range(__lowerCAmelCase):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j])
current_root *= root
# Update
a : Union[str, Any] = new_dft
a : List[str] = next_ncol // 2
return dft[0]
def __snake_case ( self : Tuple):
a : List[Any] = self.__dft("A")
a : Tuple = self.__dft("B")
a : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0]) <= 1:
return inverce_c[0]
# Inverse DFT
a : List[str] = 2
while next_ncol <= self.c_max_length:
a : Union[str, Any] = [[] for i in range(__lowerCAmelCase)]
a : Tuple = self.root ** (next_ncol // 2)
a : List[str] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol):
for i in range(next_ncol // 2):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2)
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root))
current_root *= root
# Update
a : List[str] = new_inverse_c
next_ncol *= 2
# Unpack
a : str = [round(x[0].real , 8) + round(x[0].imag , 8) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict):
a : Dict = "A = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A]))
a : Dict = "B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B]))
a : List[str] = "A*B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product))
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : int=18 , __UpperCAmelCase : int=30 , __UpperCAmelCase : Optional[int]=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=True , ):
a : Optional[int] = size if size is not None else {"height": 18, "width": 18}
a : Any = parent
a : int = batch_size
a : str = num_channels
a : Dict = image_size
a : Dict = min_resolution
a : Optional[int] = max_resolution
a : Optional[int] = do_resize
a : Any = size
a : Dict = apply_ocr
def __snake_case ( self : Optional[int]):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __snake_case ( self : List[Any]):
a : Optional[int] = LayoutLMvaImageProcessingTester(self)
@property
def __snake_case ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : List[Any]):
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "apply_ocr"))
def __snake_case ( self : str):
a : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def __snake_case ( self : Union[str, Any]):
pass
def __snake_case ( self : List[str]):
# Initialize image_processing
a : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase)
self.assertIsInstance(encoding.boxes , __UpperCAmelCase)
# Test batched
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : Union[str, Any]):
# Initialize image_processing
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# with apply_OCR = True
a : List[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a : List[str] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
a : int = Image.open(ds[0]["file"]).convert("RGB")
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a : str = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase)
self.assertListEqual(encoding.boxes , __UpperCAmelCase)
# with apply_OCR = False
a : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase)
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224))
| 226
| 0
|
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase__ :
"""simple docstring"""
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : "AutoTokenizer" , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = skip_prompt
__SCREAMING_SNAKE_CASE = decode_kwargs
# variables used in the streaming process
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
__SCREAMING_SNAKE_CASE = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__SCREAMING_SNAKE_CASE = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# If the last token is a CJK character, we print the characters.
elif len(__SCREAMING_SNAKE_CASE ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
self.print_len += len(__SCREAMING_SNAKE_CASE )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__SCREAMING_SNAKE_CASE = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__SCREAMING_SNAKE_CASE )
self.on_finalized_text(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
if len(self.token_cache ) > 0:
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = True
self.on_finalized_text(__SCREAMING_SNAKE_CASE , stream_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ) -> str:
"""simple docstring"""
print(__SCREAMING_SNAKE_CASE , flush=__SCREAMING_SNAKE_CASE , end="""""" if not stream_end else None )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : "AutoTokenizer" , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[float] = None , **__SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = Queue()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = timeout
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : bool = False ) -> Any:
"""simple docstring"""
self.text_queue.put(__SCREAMING_SNAKE_CASE , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 267
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.uniform(__SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 267
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 355
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__lowerCamelCase ) * abs(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
| 0
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__snake_case : Optional[Any] = parse(importlib.metadata.version("""torch"""))
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}''')
a_ : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(a__ , a__):
a_ : Optional[int] = parse(importlib.metadata.version(a__))
return operation(a__ , parse(a__))
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return compare_versions(a__ , a__ , a__)
| 248
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self , _snake_case=None , **_snake_case ) -> int:
"""simple docstring"""
super().__init__(features=_snake_case )
UpperCAmelCase = torch_tensor_kwargs
import torch # noqa import torch at initialization
def snake_case_ ( self , _snake_case ) -> Union[str, Any]:
"""simple docstring"""
import torch
if isinstance(_snake_case , _snake_case ) and column:
if all(
isinstance(_snake_case , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_snake_case )
return column
def snake_case_ ( self , _snake_case ) -> Optional[int]:
"""simple docstring"""
import torch
if isinstance(_snake_case , (str, bytes, type(_snake_case )) ):
return value
elif isinstance(_snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase = {}
if isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCAmelCase = {'''dtype''': torch.intaa}
elif isinstance(_snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_snake_case , PIL.Image.Image ):
UpperCAmelCase = np.asarray(_snake_case )
return torch.tensor(_snake_case , **{**default_dtype, **self.torch_tensor_kwargs} )
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(_snake_case , '''__array__''' ) and not isinstance(_snake_case , torch.Tensor ):
UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_snake_case , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
elif isinstance(_snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_snake_case ) for substruct in data_struct] )
return self._tensorize(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[Any]:
"""simple docstring"""
return map_nested(self._recursive_tensorize , _snake_case , map_list=_snake_case )
def snake_case_ ( self , _snake_case ) -> Mapping:
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_row(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_row(_snake_case )
return self.recursive_tensorize(_snake_case )
def snake_case_ ( self , _snake_case ) -> "torch.Tensor":
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_column(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_column(_snake_case , pa_table.column_names[0] )
UpperCAmelCase = self.recursive_tensorize(_snake_case )
UpperCAmelCase = self._consolidate(_snake_case )
return column
def snake_case_ ( self , _snake_case ) -> Mapping:
"""simple docstring"""
UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(_snake_case )
UpperCAmelCase = self.python_features_decoder.decode_batch(_snake_case )
UpperCAmelCase = self.recursive_tensorize(_snake_case )
for column_name in batch:
UpperCAmelCase = self._consolidate(batch[column_name] )
return batch
| 152
| 1
|
_A = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ):
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 278
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCAmelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
lowerCAmelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase_ = [4, 4, 4, 4]
lowerCAmelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase_ = [3, 3, 3, 3]
else:
lowerCAmelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase_ = 96
elif "small" in model_name:
lowerCAmelCase_ = 96
elif "base" in model_name:
lowerCAmelCase_ = 128
elif "large" in model_name:
lowerCAmelCase_ = 192
elif "xlarge" in model_name:
lowerCAmelCase_ = 256
elif "huge" in model_name:
lowerCAmelCase_ = 352
# set label information
lowerCAmelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCAmelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCAmelCase_ = '''imagenet-1k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = FocalNetConfig(
embed_dim=_A , depths=_A , focal_levels=_A , focal_windows=_A , use_conv_embed=_A , idalabel=_A , labelaid=_A , use_post_layernorm=_A , use_layerscale=_A , )
return config
def __UpperCamelCase ( _A ):
if "patch_embed.proj" in name:
lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCAmelCase_ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowerCAmelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCAmelCase_ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowerCAmelCase_ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowerCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase_ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowerCAmelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCAmelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCAmelCase_ = name.replace('''head''' , '''classifier''' )
else:
lowerCAmelCase_ = '''focalnet.''' + name
return name
def __UpperCamelCase ( _A , _A , _A=False ):
# fmt: off
lowerCAmelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCAmelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _A )
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
lowerCAmelCase_ = val
lowerCAmelCase_ = get_focalnet_config(_A )
lowerCAmelCase_ = FocalNetForImageClassification(_A )
model.eval()
# load state dict
model.load_state_dict(_A )
# verify conversion
lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ = BitImageProcessor(
do_resize=_A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_A , crop_size=224 , do_normalize=_A , image_mean=_A , image_std=_A , )
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw )
lowerCAmelCase_ = processor(images=_A , return_tensors='''pt''' )
lowerCAmelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCAmelCase_ = image_transforms(_A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _A , atol=1E-4 )
lowerCAmelCase_ = model(**_A )
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase_ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase_ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCAmelCase_ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase_ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCAmelCase_ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase_ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""OwlViTFeatureExtractor"""]
_UpperCAmelCase = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
_UpperCAmelCase = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Optional[int] = (
list(range(ord('!' ) ,ord('~' ) + 1 ) ) + list(range(ord('¡' ) ,ord('¬' ) + 1 ) ) + list(range(ord('®' ) ,ord('ÿ' ) + 1 ) )
)
A_ : Any = bs[:]
A_ : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A_ : str = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase ,__lowercase ) )
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : List[str] = set()
A_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Union[str, Any] = char
return pairs
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , **lowercase , ):
"""simple docstring"""
A_ : Union[str, Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
A_ : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
A_ : str = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
A_ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
A_ : List[str] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
A_ : Optional[int] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : int = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
errors=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , **lowercase , )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
A_ : int = json.load(lowercase )
A_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
A_ : Union[str, Any] = errors # how to handle errors in decoding
A_ : Tuple = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
A_ : str = merges_handle.read().split('\n' )[1:-1]
A_ : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
A_ : Any = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : str = {}
A_ : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
A_ : List[Any] = tuple(lowercase )
A_ : List[Any] = get_pairs(lowercase )
if not pairs:
return token
while True:
A_ : List[str] = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Optional[Any] = bigram
A_ : List[Any] = []
A_ : Tuple = 0
while i < len(lowercase ):
try:
A_ : Optional[Any] = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : int = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : Union[str, Any] = tuple(lowercase )
A_ : Tuple = new_word
if len(lowercase ) == 1:
break
else:
A_ : Optional[int] = get_pairs(lowercase )
A_ : Optional[Any] = ' '.join(lowercase )
A_ : str = word
return word
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[int] = []
for token in re.findall(self.pat , lowercase ):
A_ : Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase ).split(' ' ) )
return bpe_tokens
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.decoder.get(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = ''.join(lowercase )
A_ : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ : Optional[int] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A_ : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
A_ : Dict = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A_ : int = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
A_ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def lowerCAmelCase_ ( self , lowercase , lowercase = None ):
"""simple docstring"""
A_ : Optional[int] = [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self , lowercase , lowercase=False , **lowercase ):
"""simple docstring"""
A_ : Tuple = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase ) > 0 and not text[0].isspace()):
A_ : int = ' ' + text
return (text, kwargs)
| 192
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , a_ : List[Any]="</s>" , a_ : str="<unk>" , a_ : Tuple="<pad>" , a_ : List[Any]=1_25 , a_ : Any=None , **a_ : Tuple , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : Optional[Any] = [F'<extra_id_{i}>' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__UpperCAmelCase : Optional[Any] = len(set(filter(lambda a_ : bool('''extra_id''' in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
__UpperCAmelCase : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
__UpperCAmelCase : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
__UpperCAmelCase : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
super().__init__(
eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
__UpperCAmelCase : Dict = extra_ids
__UpperCAmelCase : Any = 2**8 # utf is 8 bits
# define special tokens dict
__UpperCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__UpperCAmelCase : Optional[int] = len(self.special_tokens_encoder )
__UpperCAmelCase : Union[str, Any] = len(a_ )
for i, token in enumerate(a_ ):
__UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n
__UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self : Tuple ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a_ )) + [1]
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def snake_case__ ( self : Any , a_ : List[int] ):
'''simple docstring'''
if len(a_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case__ ( self : Optional[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self._add_eos_if_not_present(a_ )
if token_ids_a is None:
return token_ids_a
else:
__UpperCAmelCase : List[str] = self._add_eos_if_not_present(a_ )
return token_ids_a + token_ids_a
def snake_case__ ( self : List[Any] , a_ : str ):
'''simple docstring'''
__UpperCAmelCase : str = [chr(a_ ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case__ ( self : Optional[Any] , a_ : int ):
'''simple docstring'''
if token in self.special_tokens_encoder:
__UpperCAmelCase : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__UpperCAmelCase : Optional[Any] = self.added_tokens_encoder[token]
elif len(a_ ) != 1:
__UpperCAmelCase : int = self.unk_token_id
else:
__UpperCAmelCase : List[str] = ord(a_ ) + self._num_special_tokens
return token_id
def snake_case__ ( self : Union[str, Any] , a_ : Optional[int] ):
'''simple docstring'''
if index in self.special_tokens_decoder:
__UpperCAmelCase : Dict = self.special_tokens_decoder[index]
else:
__UpperCAmelCase : List[Any] = chr(index - self._num_special_tokens )
return token
def snake_case__ ( self : int , a_ : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
__UpperCAmelCase : Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
__UpperCAmelCase : List[str] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
__UpperCAmelCase : Tuple = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
__UpperCAmelCase : int = token.encode('''utf-8''' )
else:
__UpperCAmelCase : Any = bytes([ord(a_ )] )
bstring += tok_string
__UpperCAmelCase : Dict = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case__ ( self : Tuple , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
return ()
| 226
| 0
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'align_text_model'
def __init__( self : int , a : Dict=30_522 , a : int=768 , a : Tuple=12 , a : List[Any]=12 , a : List[Any]=3_072 , a : Optional[int]="gelu" , a : List[str]=0.1 , a : Any=0.1 , a : Optional[int]=512 , a : Any=2 , a : Union[str, Any]=0.0_2 , a : Any=1E-12 , a : Optional[int]=0 , a : Dict="absolute" , a : Dict=True , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : str = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Any = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : str = position_embedding_type
lowerCAmelCase__ : List[str] = use_cache
lowerCAmelCase__ : Optional[Any] = pad_token_id
@classmethod
def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowerCAmelCase__ : Optional[Any] = cls.get_config_dict(a , **a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase__ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class A__ ( __magic_name__ ):
lowercase = 'align_vision_model'
def __init__( self : Optional[Any] , a : int = 3 , a : int = 600 , a : float = 2.0 , a : float = 3.1 , a : int = 8 , a : List[int] = [3, 3, 5, 3, 5, 5, 3] , a : List[int] = [32, 16, 24, 40, 80, 112, 192] , a : List[int] = [16, 24, 40, 80, 112, 192, 320] , a : List[int] = [] , a : List[int] = [1, 2, 2, 2, 1, 2, 1] , a : List[int] = [1, 2, 2, 3, 3, 4, 1] , a : List[int] = [1, 6, 6, 6, 6, 6, 6] , a : float = 0.2_5 , a : str = "swish" , a : int = 2_560 , a : str = "mean" , a : float = 0.0_2 , a : float = 0.0_0_1 , a : float = 0.9_9 , a : float = 0.2 , **a : Tuple , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[int] = width_coefficient
lowerCAmelCase__ : str = depth_coefficient
lowerCAmelCase__ : List[Any] = depth_divisor
lowerCAmelCase__ : str = kernel_sizes
lowerCAmelCase__ : List[Any] = in_channels
lowerCAmelCase__ : Union[str, Any] = out_channels
lowerCAmelCase__ : Optional[int] = depthwise_padding
lowerCAmelCase__ : Dict = strides
lowerCAmelCase__ : Any = num_block_repeats
lowerCAmelCase__ : int = expand_ratios
lowerCAmelCase__ : Dict = squeeze_expansion_ratio
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : Dict = hidden_dim
lowerCAmelCase__ : Tuple = pooling_type
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : List[Any] = batch_norm_eps
lowerCAmelCase__ : Tuple = batch_norm_momentum
lowerCAmelCase__ : Any = drop_connect_rate
lowerCAmelCase__ : int = sum(a ) * 4
@classmethod
def _lowerCamelCase ( cls : List[str] , a : Union[str, os.PathLike] , **a : Dict ):
'''simple docstring'''
cls._set_token_in_kwargs(a )
lowerCAmelCase__ : int = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
lowerCAmelCase__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a , **a )
class A__ ( __magic_name__ ):
lowercase = 'align'
lowercase = True
def __init__( self : Tuple , a : str=None , a : List[Any]=None , a : Any=640 , a : Tuple=1.0 , a : Optional[Any]=0.0_2 , **a : Any , ):
'''simple docstring'''
super().__init__(**a )
if text_config is None:
lowerCAmelCase__ : List[Any] = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
lowerCAmelCase__ : Optional[Any] = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
lowerCAmelCase__ : str = AlignTextConfig(**a )
lowerCAmelCase__ : Tuple = AlignVisionConfig(**a )
lowerCAmelCase__ : Dict = projection_dim
lowerCAmelCase__ : Tuple = temperature_init_value
lowerCAmelCase__ : List[Any] = initializer_range
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , a : AlignTextConfig , a : AlignVisionConfig , **a : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : List[Any] = self.text_config.to_dict()
lowerCAmelCase__ : Optional[int] = self.vision_config.to_dict()
lowerCAmelCase__ : Dict = self.__class__.model_type
return output
| 351
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowerCamelCase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob("""*.log"""):
lowerCamelCase__ = 0
with open(log, """r""") as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowerCamelCase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowerCamelCase__ = F"""{line["duration"]:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = """"""
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split("""::""")
lowerCamelCase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowerCamelCase__ = """Too many failed tests, please see the full report in the Action results."""
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3000 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
lowerCamelCase__ = """No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
lowerCamelCase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowerCamelCase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = """"""
lowerCamelCase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
| 0
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
lowercase_ : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : List[Any] = row[0]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
if magnitude == 0:
lowercase_ : Dict = column
continue
lowercase_ : Any = column / magnitude
# Subtract to cancel term
lowercase_ : Union[str, Any] = current_set[0]
lowercase_ : Dict = [first_row]
lowercase_ : Optional[Any] = current_set[1::]
for row in current_set:
lowercase_ : Optional[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(__SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase_ : List[Any] = final_set[0]
lowercase_ : Any = []
lowercase_ : List[str] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase_ : Optional[Any] = simplify(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase_ : int = resultant
return final_set
def snake_case_ ( __SCREAMING_SNAKE_CASE : list[list] ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
lowercase_ : Dict = len(__SCREAMING_SNAKE_CASE ) + 1
if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase_ : Optional[int] = equations.copy()
if any(0 in row for row in data_set ):
lowercase_ : Tuple = data_set.copy()
lowercase_ : Any = []
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
if 0 not in row:
lowercase_ : Optional[Any] = data_set.pop(__SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase_ : int = data_set.copy()
lowercase_ : List[str] = simplify(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = simplified[::-1]
lowercase_ : list = []
for row in simplified:
lowercase_ : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase_ : Optional[int] = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
lowercase_ : List[str] = temp_row[1::]
lowercase_ : Tuple = temp_row[::-1]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = []
for item in solutions:
final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 93
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = TFViTMAEModel(config=_a )
UpperCamelCase = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> str:
'''simple docstring'''
UpperCamelCase = TFViTMAEForPreTraining(_a )
UpperCamelCase = model(_a , training=_a )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(_a )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(_a , training=_a )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
(UpperCamelCase) = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , unittest.TestCase):
lowerCAmelCase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _a )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
UpperCamelCase = self._prepare_for_class(_a , _a )
UpperCamelCase = model(_a , noise=_a )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(_a , _a ) )
UpperCamelCase = model(**_a , noise=_a )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
UpperCamelCase = self._prepare_for_class(_a , _a )
UpperCamelCase = prepare_numpy_arrays(_a )
UpperCamelCase = model(_a , noise=_a )
UpperCamelCase = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> str:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , '_keras_serializable' , _a )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(_a )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(_a )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(_a , outputs=main_layer(_a ) )
UpperCamelCase = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(_a , 'keras_model.h5' )
model.save(_a )
UpperCamelCase = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
UpperCamelCase = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
UpperCamelCase = self._prepare_for_class(_a , _a )
UpperCamelCase = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
UpperCamelCase = model_class.from_pretrained(_a )
UpperCamelCase = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs["last_hidden_state"].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs["logits"].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(_a )
UpperCamelCase = self._prepare_for_class(_a , _a )
UpperCamelCase = model(_a , noise=_a )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_a )
def A_( ):
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
@cached_property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=_a , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**_a , noise=_a )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _a )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
| 369
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : Any = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCAmelCase : Dict = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : Any = [1, 10_24, 7, 7]
# Image classification docstring
lowerCAmelCase : Optional[Any] = 'google/mobilenet_v1_1.0_224'
lowerCAmelCase : List[str] = 'tabby, tabby cat'
lowerCAmelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_( A : Union[str, Any] , A : Optional[Any] , A : Optional[Any]=None):
UpperCamelCase = {}
if isinstance(A , A):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = 'MobilenetV1/Conv2d_0/'
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/'''
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(A , A):
UpperCamelCase = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def A_( A : int , A : str , A : Optional[int]):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.')
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(A)
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(f'''Loading TF weight {name} with shape {shape}''')
UpperCamelCase = tf.train.load_variable(A , A)
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(A , A , A)
for name, pointer in tf_to_pt_map.items():
logger.info(f'''Importing {name}''')
if name not in tf_weights:
logger.info(f'''{name} not in tf pre-trained weights, skipping''')
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise')
UpperCamelCase = np.transpose(A , (2, 3, 0, 1))
elif "weights" in name:
logger.info('Transposing')
if len(pointer.shape) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(A , (3, 2, 0, 1))
if pointer.shape != array.shape:
raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''')
logger.info(f'''Initialize PyTorch weight {name} {array.shape}''')
UpperCamelCase = torch.from_numpy(A)
tf_weights.pop(A , A)
tf_weights.pop(name + '/RMSProp' , A)
tf_weights.pop(name + '/RMSProp_1' , A)
tf_weights.pop(name + '/ExponentialMovingAverage' , A)
logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''')
return model
def A_( A : torch.Tensor , A : nn.Convad):
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0)
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0)
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0)
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0)
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(A , A , 'constant' , 0.0)
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , )-> None:
'''simple docstring'''
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' )
if out_channels % groups != 0:
raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=A_ , out_channels=A_ , kernel_size=A_ , stride=A_ , padding=A_ , groups=A_ , bias=A_ , padding_mode='zeros' , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=A_ , eps=config.layer_norm_eps , momentum=0.9_997 , affine=A_ , track_running_stats=A_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(A_ , A_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , A_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def UpperCAmelCase_ ( self , A_ )-> torch.Tensor:
'''simple docstring'''
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(A_ , self.convolution )
UpperCamelCase = self.convolution(A_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(A_ )
if self.activation is not None:
UpperCamelCase = self.activation(A_ )
return features
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = """mobilenet_v1"""
lowerCAmelCase_ = """pixel_values"""
lowerCAmelCase_ = False
def UpperCAmelCase_ ( self , A_ )-> None:
'''simple docstring'''
if isinstance(A_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCAmelCase : Union[str, Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : Union[str, Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ = True )-> Union[str, Any]:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
A_ , in_channels=config.num_channels , out_channels=A_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=3 , stride=strides[i] , groups=A_ , ) )
self.layer.append(
MobileNetVaConvLayer(
A_ , in_channels=A_ , out_channels=A_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
UpperCamelCase = self.conv_stem(A_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(A_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(A_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=A_ , )
@add_start_docstrings(
"""
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> None:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(A_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=A_ )
UpperCamelCase = nn.Linear(A_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(A_ , output_hidden_states=A_ , return_dict=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(A_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = 'single_label_classification'
else:
UpperCamelCase = 'multi_label_classification'
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(A_ , A_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(A_ , A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A_ , logits=A_ , hidden_states=outputs.hidden_states , )
| 251
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =old_name.split('''.''' )
if layer == "0":
SCREAMING_SNAKE_CASE__ : int =old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
SCREAMING_SNAKE_CASE__ : Tuple =old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
SCREAMING_SNAKE_CASE__ : List[Any] =old_name.replace('''3''', '''convolution2''' )
else:
SCREAMING_SNAKE_CASE__ : Dict =old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =R'''\b\d{2}\b'''
if bool(re.search(UpperCamelCase__, UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : int =re.search(R'''\d\.\d\d.''', UpperCamelCase__ ).group()
else:
SCREAMING_SNAKE_CASE__ : Tuple =re.search(R'''\d\.\d.''', UpperCamelCase__ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE__ : List[str] =old_name.replace(UpperCamelCase__, '''''' )
SCREAMING_SNAKE_CASE__ : Any =trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
SCREAMING_SNAKE_CASE__ : Any ='''intermediate_stages.''' + trimmed_name
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =old_name.replace(UpperCamelCase__, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE__ : str =trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
SCREAMING_SNAKE_CASE__ : int =str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE__ : Any =trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE__ : List[Any] =trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE__ : str =trimmed_name.replace('''fc2''', '''linear_out''' )
SCREAMING_SNAKE_CASE__ : Any ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''', UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : int =old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE__ : str =new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE__ : Tuple =new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE__ : List[str] =new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE__ : Tuple =new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Any =new_name.replace('''norm''', '''layernorm''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''efficientformer.''' + new_name
else:
SCREAMING_SNAKE_CASE__ : str ='''efficientformer.encoder.''' + new_name
return new_name
def _a( UpperCamelCase__ : int, UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE__ : List[str] =checkpoint.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =val
return checkpoint
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : List[str] =Image.open(requests.get(UpperCamelCase__, stream=UpperCamelCase__ ).raw )
return image
def _a( UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : Path, UpperCamelCase__ : bool ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : Optional[int] =EfficientFormerConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
SCREAMING_SNAKE_CASE__ : Tuple =config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE__ : Tuple =convert_torch_checkpoint(UpperCamelCase__, UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE__ : Any =prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] =2_5_6
SCREAMING_SNAKE_CASE__ : Optional[int] =2_2_4
SCREAMING_SNAKE_CASE__ : List[Any] =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
SCREAMING_SNAKE_CASE__ : str =processor(images=UpperCamelCase__, return_tensors='''pt''' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE__ : List[Any] =Compose(
[
Resize(UpperCamelCase__, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__, UpperCamelCase__ ),
] )
SCREAMING_SNAKE_CASE__ : List[str] =image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =outputs.logits
SCREAMING_SNAKE_CASE__ : Dict =(1, 1_0_0_0)
if "l1" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0], UpperCamelCase__, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(UpperCamelCase__ )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add model''', use_temp_dir=UpperCamelCase__, )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}", commit_message='''Add image processor''', use_temp_dir=UpperCamelCase__, )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
a_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 152
| 1
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( _lowerCAmelCase ):
def __init__( self : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["""bs4"""] )
super().__init__(**SCREAMING_SNAKE_CASE_ )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
lowerCAmelCase_ : int = parent.find_all(child.name , recursive=SCREAMING_SNAKE_CASE_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(SCREAMING_SNAKE_CASE_ ) else next(i for i, s in enumerate(SCREAMING_SNAKE_CASE_ , 1 ) if s is child ) )
lowerCAmelCase_ : Tuple = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def A ( self : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : List[str] = BeautifulSoup(SCREAMING_SNAKE_CASE_ , """html.parser""" )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Dict = []
for element in html_code.descendants:
if type(SCREAMING_SNAKE_CASE_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
lowerCAmelCase_ : str = html.unescape(SCREAMING_SNAKE_CASE_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self.xpath_soup(SCREAMING_SNAKE_CASE_ )
stringaxtag_seq.append(SCREAMING_SNAKE_CASE_ )
stringaxsubs_seq.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : str ):
lowerCAmelCase_ : List[str] = """"""
for tagname, subs in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
xpath += F'/{tagname}'
if subs != 0:
xpath += F'[{subs}]'
return xpath
def __call__( self : Dict , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : str = False
# Check that strings has a valid type
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = True
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
if len(SCREAMING_SNAKE_CASE_ ) == 0 or isinstance(html_strings[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : int = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
F'but is of type {type(SCREAMING_SNAKE_CASE_ )}.' )
lowerCAmelCase_ : Optional[int] = bool(isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(html_strings[0] , SCREAMING_SNAKE_CASE_ )) )
if not is_batched:
lowerCAmelCase_ : Optional[int] = [html_strings]
# Get nodes + xpaths
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : List[Any] = []
for html_string in html_strings:
lowerCAmelCase_ : Dict = self.get_three_from_single(SCREAMING_SNAKE_CASE_ )
nodes.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = []
for node, tag_list, sub_list in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Dict = self.construct_xpath(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
xpath_strings.append(SCREAMING_SNAKE_CASE_ )
xpaths.append(SCREAMING_SNAKE_CASE_ )
# return as Dict
lowerCAmelCase_ : List[Any] = {"""nodes""": nodes, """xpaths""": xpaths}
lowerCAmelCase_ : Union[str, Any] = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 365
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def UpperCamelCase ( __lowerCamelCase : SplitDict ):
snake_case : List[Any] = split_dict._to_yaml_list()
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
snake_case : List[Any] = SplitDict._from_yaml_list(__lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case : Tuple = None
# the split name of split_dict takes over the name of the split info object
snake_case : str = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=__lowerCamelCase ), SplitInfo(dataset_name="my_dataset" )] )
def UpperCamelCase ( __lowerCamelCase : Dict ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case : Tuple = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 59
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
A_ : List[Any] = logging.get_logger(__name__)
class _a :
'''simple docstring'''
def __init__( self , A__ = None , A__ = None , A__=None , A__=None ):
if not conversation_id:
A__ : List[Any] = uuid.uuida()
if past_user_inputs is None:
A__ : Dict = []
if generated_responses is None:
A__ : int = []
A__ : uuid.UUID = conversation_id
A__ : List[str] = past_user_inputs
A__ : List[str] = generated_responses
A__ : Optional[str] = text
def __eq__( self , A__ ):
if not isinstance(A__ , A__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __A ( self , A__ , A__ = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
A__ : str = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
A__ : Tuple = text
def __A ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A__ : Tuple = None
def __A ( self , A__ ):
self.generated_responses.append(A__ )
def __A ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
A__ : Optional[Any] = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
A__ : str = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__magic_name__ , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , *A__ , **A__ ):
super().__init__(*A__ , **A__ )
if self.tokenizer.pad_token_id is None:
A__ : Tuple = self.tokenizer.eos_token
def __A ( self , A__=None , A__=None , A__=None , **A__ ):
A__ : Tuple = {}
A__ : List[str] = {}
A__ : Union[str, Any] = {}
if min_length_for_response is not None:
A__ : str = min_length_for_response
if minimum_tokens is not None:
A__ : List[str] = minimum_tokens
if "max_length" in generate_kwargs:
A__ : List[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A__ : Optional[int] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(A__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self , A__ , A__=0 , **A__ ):
A__ : Optional[Any] = super().__call__(A__ , num_workers=A__ , **A__ )
if isinstance(A__ , A__ ) and len(A__ ) == 1:
return outputs[0]
return outputs
def __A ( self , A__ , A__=32 ):
if not isinstance(A__ , A__ ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
A__ : List[str] = self.tokenizer._build_conversation_input_ids(A__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A__ : Tuple = self._legacy_parse_and_tokenize(A__ )
if self.framework == "pt":
A__ : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __A ( self , A__ , A__=10 , **A__ ):
A__ : List[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
A__ : Optional[int] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
A__ : Dict = max_length - minimum_tokens
A__ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
A__ : str = model_inputs["""attention_mask"""][:, -trim:]
A__ : List[str] = model_inputs.pop("""conversation""" )
A__ : Dict = max_length
A__ : str = self.model.generate(**A__ , **A__ )
if self.model.config.is_encoder_decoder:
A__ : Union[str, Any] = 1
else:
A__ : Optional[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __A ( self , A__ , A__=True ):
A__ : Dict = model_outputs["""output_ids"""]
A__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ , )
A__ : Optional[int] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(A__ )
return conversation
def __A ( self , A__ ):
A__ : str = self.tokenizer.eos_token_id
A__ : Tuple = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(A__ , add_special_tokens=A__ ) )
if len(A__ ) > self.tokenizer.model_max_length:
A__ : str = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 192
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 238
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__(self : Optional[int] , a__ : Tuple=None , **a__ : Optional[int] ):
"""simple docstring"""
super().__init__(features=a__ )
__snake_case = torch_tensor_kwargs
import torch # noqa import torch at initialization
def a (self : Union[str, Any] , a__ : Union[str, Any] ):
"""simple docstring"""
import torch
if isinstance(a__ , a__ ) and column:
if all(
isinstance(a__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a__ )
return column
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
import torch
if isinstance(a__ , (str, bytes, type(a__ )) ):
return value
elif isinstance(a__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case = {}
if isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case = {'''dtype''': torch.intaa}
elif isinstance(a__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a__ , PIL.Image.Image ):
__snake_case = np.asarray(a__ )
return torch.tensor(a__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(a__ , '''__array__''' ) and not isinstance(a__ , torch.Tensor ):
__snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
elif isinstance(a__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a__ ) for substruct in data_struct] )
return self._tensorize(a__ )
def a (self : Optional[Any] , a__ : dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , a__ , map_list=a__ )
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_row(a__ )
__snake_case = self.python_features_decoder.decode_row(a__ )
return self.recursive_tensorize(a__ )
def a (self : List[Any] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_column(a__ )
__snake_case = self.python_features_decoder.decode_column(a__ , pa_table.column_names[0] )
__snake_case = self.recursive_tensorize(a__ )
__snake_case = self._consolidate(a__ )
return column
def a (self : Optional[int] , a__ : pa.Table ):
"""simple docstring"""
__snake_case = self.numpy_arrow_extractor().extract_batch(a__ )
__snake_case = self.python_features_decoder.decode_batch(a__ )
__snake_case = self.recursive_tensorize(a__ )
for column_name in batch:
__snake_case = self._consolidate(batch[column_name] )
return batch
| 238
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( __A, __A, __A, __A, __A = None, __A = None, __A = None, ) -> Dict:
'''simple docstring'''
if config_name_or_path is None:
UpperCAmelCase__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCAmelCase__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase__ = question_encoder_name_or_path
UpperCAmelCase__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCAmelCase__ = RagConfig.from_pretrained(_A )
UpperCAmelCase__ = AutoConfig.from_pretrained(_A )
UpperCAmelCase__ = AutoConfig.from_pretrained(_A )
UpperCAmelCase__ = gen_config
UpperCAmelCase__ = question_encoder_config
UpperCAmelCase__ = model_class.from_pretrained_question_encoder_generator(
_A, _A, config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCAmelCase__ = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 65
|
from math import isclose, sqrt
def a_ ( _A , _A , _A ) -> tuple[float, float, float]:
"""simple docstring"""
snake_case__ = point_y / 4 / point_x
snake_case__ = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
snake_case__ = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
snake_case__ = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
snake_case__ = outgoing_gradient**2 + 4
snake_case__ = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
snake_case__ = (point_y - outgoing_gradient * point_x) ** 2 - 100
snake_case__ = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
snake_case__ = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
snake_case__ = x_minus if isclose(_A , _A ) else x_plus
snake_case__ = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( _A = 1.4 , _A = -9.6 ) -> int:
"""simple docstring"""
snake_case__ = 0
snake_case__ = first_x_coord
snake_case__ = first_y_coord
snake_case__ = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
snake_case__ , snake_case__ , snake_case__ = next_point(_A , _A , _A )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _a ( _a ):
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase_: List[Any] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase_ ( self: str ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
| 368
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "swinv2"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , snake_case_ : int=224 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3 , snake_case_ : Optional[Any]=96 , snake_case_ : str=[2, 2, 6, 2] , snake_case_ : Tuple=[3, 6, 12, 24] , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Optional[int]=32 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : str = embed_dim
snake_case__ : List[str] = depths
snake_case__ : int = len(snake_case_ )
snake_case__ : Union[str, Any] = num_heads
snake_case__ : Tuple = window_size
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : str = use_absolute_embeddings
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
snake_case__ : Tuple = (0, 0, 0, 0)
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251
| 0
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return abs(UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a , UpperCamelCase )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_a , _a = y, x % y
return abs(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
try:
_a = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
_a = int(nums[0] )
_a = int(nums[1] )
print(
f'greatest_common_divisor({num_a}, {num_a}) = '
f'{greatest_common_divisor(UpperCamelCase , UpperCamelCase )}' )
print(f'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(UpperCamelCase , UpperCamelCase )}' )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 362
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179
| 0
|
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase_ : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase_ : Dict = os.path.join(git_repo_path, """src""", """diffusers""")
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =find_backend(''' if not is_torch_available():''' )
self.assertEqual(__A , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a =find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(__A , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a =find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(__A , '''torch_and_transformers_and_onnx''' )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __A )
self.assertIn('''torch_and_transformers''' , __A )
self.assertIn('''flax_and_transformers''' , __A )
self.assertIn('''torch_and_transformers_and_onnx''' , __A )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(__A , '''\nCONSTANT = None\n''' )
a =create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
__A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
a ='''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
a =create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(__A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a ='''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
a =create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , __A )
| 81
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCAmelCase = "base_with_context"
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE_ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE_ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE_ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE_ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = ly_weight['''self_attention''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = ly_weight['''MultiHeadDotProductAttention_0''']
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE_ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE_ = jnp.tree_util.tree_map(onp.array, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' )
SCREAMING_SNAKE_CASE_ = inference.parse_training_gin_file(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = inference.InferenceModel(args.checkpoint_path, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' )
SCREAMING_SNAKE_CASE_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
SCREAMING_SNAKE_CASE_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', )
SCREAMING_SNAKE_CASE_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, )
SCREAMING_SNAKE_CASE_ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = load_decoder(ta_checkpoint['''target''']['''decoder'''], __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
SCREAMING_SNAKE_CASE_ = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCamelCase, continuous_encoder=__lowerCamelCase, decoder=__lowerCamelCase, scheduler=__lowerCamelCase, melgan=__lowerCamelCase, )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 257
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def snake_case__ ( __lowerCamelCase : Dict ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Dict =emb.weight.shape
lowerCamelCase__ : Union[str, Any] =nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ : Any =emb.weight.data
return lin_layer
def snake_case__ ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=None ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] ={}
for old_key in state_dict.keys():
lowerCamelCase__ : Optional[Any] =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase__ : List[str] =key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase__ : str =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase__ : int =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase__ : Tuple =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase__ : Tuple =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase__ : Optional[Any] =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase__ : Tuple =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase__ : Optional[int] =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase__ : Optional[int] =state_dict[old_key]
return new_dict
def snake_case__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
lowerCamelCase__ : Dict =0
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
for expert in range(__lowerCamelCase ):
lowerCamelCase__ : str =switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowerCamelCase ):
lowerCamelCase__ : Any =torch.load(__lowerCamelCase )['''model''']
remove_ignore_keys_(__lowerCamelCase )
lowerCamelCase__ : Dict =rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =os.path.join(
__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(__lowerCamelCase , __lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCamelCase )[0]].dtype )
# Add the last block
lowerCamelCase__ : Optional[Any] =os.path.join(__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCamelCase )+1:05d}-of-???.bin''' ) )
lowerCamelCase__ : Any =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] =rename_fairseq_keys(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : int =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCamelCase ) == 1:
lowerCamelCase__ : Tuple =os.path.join(__lowerCamelCase , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCamelCase , __lowerCamelCase )
# Otherwise, let's build the index
lowerCamelCase__ : List[Any] ={}
for idx, shard in enumerate(__lowerCamelCase ):
lowerCamelCase__ : Optional[Any] =weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__lowerCamelCase ):05d}.bin''' )
lowerCamelCase__ : Optional[int] =os.path.join(__lowerCamelCase , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
for key in shard:
lowerCamelCase__ : int =shard_file
# Add the metadata
lowerCamelCase__ : Optional[int] ={'''total_size''': total_size}
lowerCamelCase__ : int ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase__ : Optional[Any] =json.dumps(__lowerCamelCase , indent=2 , sort_keys=__lowerCamelCase ) + '''\n'''
f.write(__lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : int = parser.parse_args()
_lowercase , _lowercase : List[str] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
_lowercase : str = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : Optional[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 238
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'deberta-v2'
def __init__( self : Optional[Any], lowerCamelCase : Optional[int]=12_8100, lowerCamelCase : List[Any]=1536, lowerCamelCase : Dict=24, lowerCamelCase : Any=24, lowerCamelCase : Union[str, Any]=6144, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0.02, lowerCamelCase : int=1E-7, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=-1, lowerCamelCase : Tuple=0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=None, lowerCamelCase : Dict=0, lowerCamelCase : Tuple="gelu", **lowerCamelCase : Optional[int], )-> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : int =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Tuple =relative_attention
lowerCamelCase__ : Optional[Any] =max_relative_positions
lowerCamelCase__ : List[Any] =pad_token_id
lowerCamelCase__ : int =position_biased_input
# Backwards compatibility
if type(lowerCamelCase ) == str:
lowerCamelCase__ : Union[str, Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
lowerCamelCase__ : Tuple =pos_att_type
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Dict =kwargs.get('''pooler_hidden_size''', lowerCamelCase )
lowerCamelCase__ : Tuple =pooler_dropout
lowerCamelCase__ : List[Any] =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case ( self : List[str] )-> int:
return 12
def snake_case ( self : str, lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, lowerCamelCase : "PreTrainedTokenizerBase" = None, )-> Mapping[str, Any]:
lowerCamelCase__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCamelCase, framework=lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 238
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : List[Any] = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(a__ )
class lowerCAmelCase__ ( a__ ):
lowerCAmelCase_ = 'rag'
lowerCAmelCase_ = True
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=" / " , __SCREAMING_SNAKE_CASE=" // " , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=3_00 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE="wiki_dpr" , __SCREAMING_SNAKE_CASE="train" , __SCREAMING_SNAKE_CASE="compressed" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(
bos_token_id=_lowerCamelCase , pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , prefix=_lowerCamelCase , vocab_size=_lowerCamelCase , **_lowerCamelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase_ : Tuple = kwargs.pop('''question_encoder''' )
lowercase_ : Optional[Any] = question_encoder_config.pop('''model_type''' )
lowercase_ : Dict = kwargs.pop('''generator''' )
lowercase_ : List[str] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase_ : Dict = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
lowercase_ : Tuple = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase )
lowercase_ : Union[str, Any] = reduce_loss
lowercase_ : int = label_smoothing
lowercase_ : Optional[Any] = exclude_bos_score
lowercase_ : Dict = do_marginalize
lowercase_ : str = title_sep
lowercase_ : str = doc_sep
lowercase_ : str = n_docs
lowercase_ : List[Any] = max_combined_length
lowercase_ : str = dataset
lowercase_ : int = dataset_split
lowercase_ : Optional[int] = index_name
lowercase_ : Any = retrieval_vector_size
lowercase_ : Union[str, Any] = retrieval_batch_size
lowercase_ : Tuple = passages_path
lowercase_ : str = index_path
lowercase_ : Tuple = use_dummy_dataset
lowercase_ : int = output_retrieved
lowercase_ : Any = do_deduplication
lowercase_ : Tuple = use_cache
if self.forced_eos_token_id is None:
lowercase_ : Dict = getattr(self.generator , '''forced_eos_token_id''' , _lowerCamelCase )
@classmethod
def _snake_case ( cls , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_lowerCamelCase )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[Any] = self.question_encoder.to_dict()
lowercase_ : Any = self.generator.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 369
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase : Any = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowerCamelCase_ ):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 264
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = (PNDMScheduler,)
__lowerCamelCase = (('''num_inference_steps''', 50),)
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_snake_case )
return config
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
"""simple docstring"""
pass
def snake_case ( self , _snake_case=0 , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case )
_lowerCAmelCase = scheduler_class.from_pretrained(_snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_prk(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = new_scheduler.step_plms(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(**_snake_case )
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = 10
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase = model(_snake_case , _snake_case )
_lowerCAmelCase = scheduler.step_plms(_snake_case , _snake_case , _snake_case ).prev_sample
return sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = dict(self.forward_default_kwargs )
_lowerCAmelCase = kwargs.pop("""num_inference_steps""" , _snake_case )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_snake_case , """set_timesteps""" ):
scheduler.set_timesteps(_snake_case )
elif num_inference_steps is not None and not hasattr(_snake_case , """set_timesteps""" ):
_lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase = dummy_past_residuals[:]
_lowerCAmelCase = scheduler.step_prk(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_prk(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase = scheduler.step_plms(_snake_case , 0 , _snake_case , **_snake_case ).prev_sample
_lowerCAmelCase = scheduler.step_plms(_snake_case , 1 , _snake_case , **_snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_snake_case )
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def snake_case ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_snake_case )
def snake_case ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase = self.dummy_sample
_lowerCAmelCase = 0.1 * sample
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.set_timesteps(_snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase = scheduler.step_prk(_snake_case , _snake_case , _snake_case ).prev_sample
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(_snake_case ):
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop()
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1e-2
assert abs(result_mean.item() - 0.2580 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1e-2
assert abs(result_mean.item() - 0.0878 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1e-2
assert abs(result_mean.item() - 0.2995 ) < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.full_loop(set_alpha_to_one=_snake_case , beta_start=0.01 )
_lowerCAmelCase = torch.sum(torch.abs(_snake_case ) )
_lowerCAmelCase = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1e-2
assert abs(result_mean.item() - 0.2434 ) < 1e-3
| 82
|
'''simple docstring'''
import torch
from transformers import AutoModel
class lowerCAmelCase__ ( torch.nn.Module ):
def __init__( self , __SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
lowercase_ : Tuple = AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.nn.CosineSimilarity(3 , 1E-0_8 )
lowercase_ : Optional[Any] = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = W_supports['''sizes'''].tolist()
lowercase_ : Dict = W_supports['''start_token_id'''].item()
lowercase_ : List[Any] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase_ : List[str] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.BERT(**__SCREAMING_SNAKE_CASE )
lowercase_ : str = None
lowercase_ : Dict = None
lowercase_ : Tuple = W_supports['''input_ids'''] == start_token_id
lowercase_ : Any = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
lowercase_ : List[str] = 0
else:
lowercase_ : List[Any] = support_sizes[i - 1]
lowercase_ : str = S[s : s + size][start_token_masks[s : s + size]]
lowercase_ : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
lowercase_ : List[str] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase_ : Tuple = torch.vstack((p_starts, p_start) )
lowercase_ : Optional[Any] = torch.vstack((p_ends, p_end) )
else:
lowercase_ : str = p_start
lowercase_ : int = p_end
return p_starts, p_ends
| 93
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "blip_text_model"
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[Any]=30_524 , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : str=3_072 , SCREAMING_SNAKE_CASE__ : Tuple=768 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : int=8 , SCREAMING_SNAKE_CASE__ : List[str]=512 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : str=1e-1_2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=30_522 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : Dict=102 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Dict=True , **SCREAMING_SNAKE_CASE__ : str , ) -> List[str]:
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , sep_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = encoder_hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = is_decoder
lowerCAmelCase__ = use_cache
@classmethod
def a ( cls : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCAmelCase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "blip_vision_model"
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str]=768 , SCREAMING_SNAKE_CASE__ : str=3_072 , SCREAMING_SNAKE_CASE__ : Any=512 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Optional[int]=12 , SCREAMING_SNAKE_CASE__ : int=384 , SCREAMING_SNAKE_CASE__ : int=16 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=1e-1_0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Union[str, Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = hidden_act
@classmethod
def a ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCAmelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "blip"
snake_case__ = True
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=512 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2.6_592 , SCREAMING_SNAKE_CASE__ : str=256 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
lowerCAmelCase__ = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCAmelCase__ = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCAmelCase__ = BlipTextConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = BlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.vision_config.hidden_size
lowerCAmelCase__ = projection_dim
lowerCAmelCase__ = logit_scale_init_value
lowerCAmelCase__ = 1.0
lowerCAmelCase__ = 0.02
lowerCAmelCase__ = image_text_hidden_size
@classmethod
def a ( cls : Tuple , SCREAMING_SNAKE_CASE__ : BlipTextConfig , SCREAMING_SNAKE_CASE__ : BlipVisionConfig , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Union[str, Any]:
lowerCAmelCase__ = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ = self.text_config.to_dict()
lowerCAmelCase__ = self.vision_config.to_dict()
lowerCAmelCase__ = self.__class__.model_type
return output
| 221
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ) -> int:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase__ = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = "sgugger/tiny-distilbert-classification"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , only_pretrain_model=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> int:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , torchscript=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a ( self : Dict ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
# set architectures equal to `None`
lowerCAmelCase__ = None
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Any ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def a ( self : int ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tinier_bart"
lowerCAmelCase__ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ , configs=[config] )
lowerCAmelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , save_to_csv=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) , env_info_csv_file=os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
benchmark.run()
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "env.csv" ) ).exists() )
def a ( self : Optional[Any] ) -> Any:
lowerCAmelCase__ = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(SCREAMING_SNAKE_CASE__ : List[Any] ):
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sequential" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "cumulative" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "current" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=SCREAMING_SNAKE_CASE__ , inference=SCREAMING_SNAKE_CASE__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) , log_print=SCREAMING_SNAKE_CASE__ , trace_memory_line_by_line=SCREAMING_SNAKE_CASE__ , multi_process=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = PyTorchBenchmark(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(SCREAMING_SNAKE_CASE__ , "log.txt" ) ).exists() )
| 221
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ = 50000
a_ = 5000
a_ , a_ = os.path.split(__file__)
a_ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Tuple ) ->Tuple:
'''simple docstring'''
for i in range(snake_case_ ):
__A : int = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Optional[Any] ,snake_case_ : int ) ->Tuple:
'''simple docstring'''
for i in range(0 ,len(snake_case_ ) ,snake_case_ ):
__A : List[str] = dataset[i : i + batch_size]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : List[Any] ,snake_case_ : Any ) ->int:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(snake_case_ ):
__A : Union[str, Any] = dataset[i]
@get_duration
def __lowercase ( snake_case_ : datasets.Dataset ,snake_case_ : Any ,snake_case_ : Union[str, Any] ,snake_case_ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
with dataset.formatted_as(type=snake_case_ ):
for i in range(0 ,snake_case_ ,snake_case_ ):
__A : Dict = dataset[i : i + batch_size]
def __lowercase ( ) ->Optional[int]:
'''simple docstring'''
__A : int = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__A : Optional[int] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__A : int = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__A : Any = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__A : List[Any] = generate_example_dataset(
os.path.join(snake_case_ ,'''dataset.arrow''' ) ,snake_case_ ,num_examples=snake_case_ ,seq_shapes={'''list''': (100,)} ,)
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ ,str(snake_case_ ) )
__A : Dict = func(snake_case_ ,**snake_case_ )
print('''shuffling dataset''' )
__A : int = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' ,func.__name__ ,str(snake_case_ ) )
__A : Optional[Any] = func(
snake_case_ ,**snake_case_ )
with open(snake_case_ ,'''wb''' ) as f:
f.write(json.dumps(snake_case_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 179
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = """poolformer"""
def __init__( self : List[Any] , snake_case_ : str=3 , snake_case_ : List[str]=16 , snake_case_ : int=16 , snake_case_ : Optional[Any]=3 , snake_case_ : Tuple=4.0 , snake_case_ : Any=[2, 2, 6, 2] , snake_case_ : Union[str, Any]=[64, 128, 320, 512] , snake_case_ : Tuple=[7, 3, 3, 3] , snake_case_ : List[Any]=[4, 2, 2, 2] , snake_case_ : str=[2, 1, 1, 1] , snake_case_ : Dict=4 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : int=True , snake_case_ : str=1e-5 , snake_case_ : Dict=0.02 , **snake_case_ : List[str] , ):
UpperCamelCase_: int = num_channels
UpperCamelCase_: int = patch_size
UpperCamelCase_: Optional[int] = stride
UpperCamelCase_: Optional[Any] = padding
UpperCamelCase_: List[str] = pool_size
UpperCamelCase_: Tuple = hidden_sizes
UpperCamelCase_: Any = mlp_ratio
UpperCamelCase_: List[Any] = depths
UpperCamelCase_: List[Any] = patch_sizes
UpperCamelCase_: Tuple = strides
UpperCamelCase_: Optional[int] = num_encoder_blocks
UpperCamelCase_: Tuple = drop_path_rate
UpperCamelCase_: Any = hidden_act
UpperCamelCase_: Dict = use_layer_scale
UpperCamelCase_: Optional[Any] = layer_scale_init_value
UpperCamelCase_: int = initializer_range
super().__init__(**snake_case_ )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return 2e-3
| 357
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = """ybelkada/fonts"""
def A__ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
requires_backends(lowerCamelCase , ["""torch"""] )
_check_torch_version()
UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 )
UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image:
requires_backends(lowerCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 )
UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase )
UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase )
elif font_path is not None:
UpperCamelCase_: List[Any] = font_path
else:
UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" )
UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding
UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding
UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase )
return image
def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
requires_backends(lowerCamelCase , """vision""" )
# Convert to PIL image if necessary
UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_: Tuple = max(header_image.width , image.width )
UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) )
UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase )
if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flattened_patches"""]
def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: List[Any] = do_convert_rgb
UpperCamelCase_: Tuple = max_patches
UpperCamelCase_: Tuple = is_vqa
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ )
# maximize scale s.t.
UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_: str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = patches.shape
UpperCamelCase_: List[str] = patches_shape[1]
UpperCamelCase_: Optional[Any] = patches_shape[2]
UpperCamelCase_: List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa )
UpperCamelCase_: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ )
return result
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ):
if image.dtype == np.uinta:
UpperCamelCase_: List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_: str = np.mean(snake_case_ )
UpperCamelCase_: str = np.std(snake_case_ )
UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_: Tuple = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCamelCase_: Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [header_text] * len(snake_case_ )
UpperCamelCase_: str = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_: str = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_: Optional[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 223
| 0
|
import math
def __lowercase ( a__ ) -> bool:
__SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(a__ )
def __lowercase ( a__ = 1 / 1_23_45 ) -> int:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 3
while True:
__SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(a__ ):
__SCREAMING_SNAKE_CASE = int(a__ )
total_partitions += 1
if check_partition_perfect(a__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(a__ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 257
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowercase ( a__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def __lowercase ( a__ ) -> int:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = emb.weight.shape
__SCREAMING_SNAKE_CASE = nn.Linear(a__ , a__ , bias=a__ )
__SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __lowercase ( a__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location='cpu' )
__SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
__SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(a__ )
__SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
__SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=a__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
__SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
__SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(a__ )
model.model.load_state_dict(a__ , strict=a__ )
__SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ : Optional[int] =parser.parse_args()
lowerCAmelCase__ : Tuple =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 257
| 1
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
UpperCAmelCase_ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
UpperCAmelCase_ = '▁'
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: str="</s>" , UpperCamelCase_: Tuple="</s>" , UpperCamelCase_: Dict="<s>" , UpperCamelCase_: int="<unk>" , UpperCamelCase_: List[Any]="<pad>" , UpperCamelCase_: Union[str, Any]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
__lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
__lowerCamelCase = vocab_file
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase_ ) )
__lowerCamelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__lowerCamelCase = len(self.sp_model ) - 1
__lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ):
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase__ ( self: str ):
return len(self.sp_model )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str ):
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCamelCase = self.sp_model.PieceToId(UpperCamelCase_ )
return spm_id if spm_id else self.unk_token_id
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: int ):
__lowerCamelCase = []
__lowerCamelCase = """"""
__lowerCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
__lowerCamelCase = True
__lowerCamelCase = []
else:
current_sub_tokens.append(UpperCamelCase_ )
__lowerCamelCase = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string.strip()
def __getstate__( self: str ):
__lowerCamelCase = self.__dict__.copy()
__lowerCamelCase = None
return state
def __setstate__( self: Optional[int] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowerCamelCase = {}
__lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase = os.path.join(
UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , """wb""" ) as fi:
__lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
| 12
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264
| 0
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowerCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
return args.f
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase="eval" ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = os.path.join(UpperCamelCase , f"""{split}_results.json""" )
if os.path.exists(UpperCamelCase ):
with open(UpperCamelCase , """r""" ) as f:
return json.load(UpperCamelCase )
raise ValueError(f"""can't find {path}""" )
_lowerCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_flax_glue.main()
lowerCAmelCase__ : List[Any] = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.7_5 )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[Any] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_clm_flax.main()
lowerCAmelCase__ : Any = get_results(__UpperCAmelCase )
self.assertLess(result["""eval_perplexity"""] ,100 )
@slow
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Union[str, Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_summarization_flax.main()
lowerCAmelCase__ : List[str] = get_results(__UpperCAmelCase ,split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] ,10 )
self.assertGreaterEqual(result["""test_rouge2"""] ,2 )
self.assertGreaterEqual(result["""test_rougeL"""] ,7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] ,7 )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : int = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_mlm_flax.main()
lowerCAmelCase__ : List[Any] = get_results(__UpperCAmelCase )
self.assertLess(result["""eval_perplexity"""] ,42 )
@slow
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[str] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_ta_mlm_flax.main()
lowerCAmelCase__ : Tuple = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.4_2 )
@slow
def UpperCAmelCase_ ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase__ : Optional[Any] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase__ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : List[str] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_flax_ner.main()
lowerCAmelCase__ : int = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] ,0.7_5 )
self.assertGreaterEqual(result["""eval_f1"""] ,0.3 )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ : Optional[int] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__UpperCAmelCase ,"""argv""" ,__UpperCAmelCase ):
run_qa.main()
lowerCAmelCase__ : str = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] ,30 )
self.assertGreaterEqual(result["""eval_exact"""] ,30 )
| 184
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : str = tempfile.mkdtemp()
lowerCAmelCase__ : List[Any] = 8
# DPR tok
lowerCAmelCase__ : int = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
lowerCAmelCase__ : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase__ : List[Any] = dict(zip(__UpperCAmelCase ,range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
lowerCAmelCase__ : str = os.path.join(self.tmpdirname ,"""bart_tokenizer""" )
os.makedirs(__UpperCAmelCase ,exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Any = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase__ : Dict = os.path.join(__UpperCAmelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) )
def UpperCAmelCase_ ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname ,"""rag_tokenizer""" )
lowerCAmelCase__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() )
lowerCAmelCase__ : str = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__UpperCAmelCase )
rag_tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Any = RagTokenizer.from_pretrained(__UpperCAmelCase ,config=__UpperCAmelCase )
self.assertIsInstance(new_rag_tokenizer.question_encoder ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator ,__UpperCAmelCase )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
lowerCAmelCase__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
lowerCAmelCase__ : str = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
lowerCAmelCase__ : Tuple = tokenizer(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
| 184
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = 'mra'
def __init__( self ,__UpperCAmelCase=5_02_65 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-5 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=4 ,__UpperCAmelCase="full" ,__UpperCAmelCase=0 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(pad_token_id=__UpperCAmelCase ,bos_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = type_vocab_size
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = block_per_row
A__ = approx_mode
A__ = initial_prior_first_n_blocks
A__ = initial_prior_diagonal_n_blocks
| 221
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 221
| 1
|
'''simple docstring'''
from __future__ import annotations
_lowerCAmelCase = '''#'''
class lowerCAmelCase_:
def __init__( self ) -> None:
lowerCAmelCase__ : dict = {}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
lowerCAmelCase__ : Dict = self._trie
for char in text:
if char not in trie:
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : Union[str, Any] = trie[char]
lowerCAmelCase__ : int = True
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> tuple | list:
lowerCAmelCase__ : List[Any] = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase__ : Optional[int] = trie[char]
else:
return []
return self._elements(snake_case__ )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> tuple:
lowerCAmelCase__ : Optional[int] = []
for c, v in d.items():
lowerCAmelCase__ : List[str] = [" "] if c == END else [(c + s) for s in self._elements(snake_case__ )]
result.extend(snake_case__ )
return tuple(snake_case__ )
_lowerCAmelCase = Trie()
_lowerCAmelCase = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = trie.find_word(__lowerCamelCase )
return tuple(string + word for word in suffixes )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 360
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=18 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,) -> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : List[str] = batch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Optional[Any] = min_resolution
lowerCAmelCase__ : Tuple = max_resolution
lowerCAmelCase__ : List[Any] = size
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : Optional[int] = do_convert_rgb
lowerCAmelCase__ : str = [512, 1024, 2048, 4096]
lowerCAmelCase__ : int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def UpperCAmelCase_ ( self ) -> Optional[int]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get(__UpperCAmelCase ,stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = self.image_processor_tester.prepare_dummy_image()
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : str = 2048
lowerCAmelCase__ : Tuple = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.0_6_0_6 ) ,atol=1E-3 ,rtol=1E-3 ) )
def UpperCAmelCase_ ( self ) -> str:
# Initialize image_processor
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[str] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : str = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Any = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Any:
# Initialize image_processor
lowerCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Tuple = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
lowerCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ : Any = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
lowerCAmelCase__ : Optional[Any] = """Hello"""
lowerCAmelCase__ : List[str] = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ,header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : str = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ,header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Tuple:
# Initialize image_processor
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
lowerCAmelCase__ : Tuple = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : Any = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : int = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
# Initialize image_processor
lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : Optional[int] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : Dict = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Union[str, Any] = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Dict = PixaStructImageProcessingTester(self ,num_channels=4 )
lowerCAmelCase__ : str = 3
@property
def UpperCAmelCase_ ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
# Initialize image_processor
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : Dict = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCAmelCase__ : int = image_processor(
image_inputs[0] ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
lowerCAmelCase__ : Dict = image_processor(
__UpperCAmelCase ,return_tensors="""pt""" ,max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
| 184
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
a_ : Optional[int] = 5_0_0_0_0
a_ : str = 5_0_0_0
a_ : List[Any] = os.path.split(__file__)
a_ : List[str] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _A (lowerCAmelCase__ :datasets.Dataset , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
for i in range(__lowerCamelCase ):
_a = dataset[i]
@get_duration
def _A (lowerCAmelCase__ :datasets.Dataset , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Tuple:
'''simple docstring'''
for i in range(0 , len(__lowerCamelCase ) , __lowerCamelCase ):
_a = dataset[i : i + batch_size]
@get_duration
def _A (lowerCAmelCase__ :datasets.Dataset , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
with dataset.formatted_as(type=__lowerCamelCase ):
for i in range(__lowerCamelCase ):
_a = dataset[i]
@get_duration
def _A (lowerCAmelCase__ :datasets.Dataset , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
with dataset.formatted_as(type=__lowerCamelCase ):
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
_a = dataset[i : i + batch_size]
def _A () -> Any:
'''simple docstring'''
_a = {"num examples": SPEED_TEST_N_EXAMPLES}
_a = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}),
]
_a = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_00}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10_00}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
_a = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
_a = generate_example_dataset(
os.path.join(__lowerCamelCase , 'dataset.arrow' ) , __lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes={'list': (1_00,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(__lowerCamelCase ) )
_a = func(__lowerCamelCase , **__lowerCamelCase )
print('shuffling dataset' )
_a = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(__lowerCamelCase ) )
_a = func(
__lowerCamelCase , **__lowerCamelCase )
with open(__lowerCamelCase , 'wb' ) as f:
f.write(json.dumps(__lowerCamelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 168
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223
| 0
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( A__ ,A__ ):
if b == 0:
return (1, 0)
(UpperCAmelCase_) : Dict = extended_euclid(A__ ,a % b )
UpperCAmelCase_ : List[Any] = a // b
return (y, x - k * y)
def snake_case ( A__ ,A__ ,A__ ,A__ ):
(UpperCAmelCase_) : Optional[int] = extended_euclid(A__ ,A__ )
UpperCAmelCase_ : Optional[Any] = na * na
UpperCAmelCase_ : Optional[int] = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( A__ ,A__ ):
(UpperCAmelCase_) : int = extended_euclid(A__ ,A__ )
if b < 0:
UpperCAmelCase_ : str = (b % n + n) % n
return b
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = invert_modulo(A__ ,A__ ), invert_modulo(A__ ,A__ )
UpperCAmelCase_ : Any = na * na
UpperCAmelCase_ : List[str] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 360
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Optional[Any] = accelerator.prepare(lowerCAmelCase_ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase_ ) )
except Exception as e:
self.fail(f"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 253
| 0
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase__ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = load_tool("text-question-answering")
self.tool.setup()
_A : List[Any] = load_tool("text-question-answering" , remote=__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Union[str, Any] = self.tool(__lowerCamelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCamelCase , "launched the BigScience Research Workshop")
def _lowerCamelCase ( self) -> Dict:
_A : List[Any] = self.remote_tool(__lowerCamelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCamelCase , "launched the BigScience Research Workshop")
def _lowerCamelCase ( self) -> str:
_A : int = self.tool(text=__lowerCamelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCamelCase , "launched the BigScience Research Workshop")
def _lowerCamelCase ( self) -> List[Any]:
_A : Union[str, Any] = self.remote_tool(text=__lowerCamelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCamelCase , "launched the BigScience Research Workshop")
| 11
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Path , __UpperCamelCase : str = None , __UpperCamelCase : str = None , __UpperCamelCase : str = None , ) -> Tuple:
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
__UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
rag_model.save_pretrained(__UpperCamelCase )
# Sanity check.
model_class.from_pretrained(__UpperCamelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : Union[str, Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 204
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[Any] = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 204
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int=7 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Any=10 , __lowerCamelCase : List[Any]=18 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : Any=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=None , ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = size if size is not None else {"shortest_edge": 18}
lowerCamelCase__ : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : List[str] = batch_size
lowerCamelCase__ : Optional[Any] = num_channels
lowerCamelCase__ : str = num_frames
lowerCamelCase__ : List[Any] = image_size
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : Optional[int] = max_resolution
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : List[str] = image_std
lowerCamelCase__ : List[Any] = crop_size
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = VivitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = VivitImageProcessingTester(self )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowerCamelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowerCamelCase__ : Any = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : List[str] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : str = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowerCamelCase__ : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for video in video_inputs:
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any = image_processing(video_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 184
|
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
while a != 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if gcd(_A , _A ) != 1:
lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCamelCase__ : Tuple = ua // va
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 184
| 1
|
import numpy as np
from PIL import Image
def a_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
# compute the shape of the output matrix
_snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
_snake_case = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
_snake_case = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case = 0
_snake_case = 0
return updated_arr
def a_ ( __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 0
# compute the shape of the output matrix
_snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
_snake_case = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
_snake_case = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
_snake_case = 0
_snake_case = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_lowerCamelCase : Dict = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 363
|
from __future__ import annotations
_lowerCamelCase : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowerCamelCase : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
_snake_case = len(__lowercase )
for i in range(__lowercase ):
_snake_case = -1
for j in range(i + 1 , __lowercase ):
if arr[i] < arr[j]:
_snake_case = arr[j]
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = []
for i, outer in enumerate(__lowercase ):
_snake_case = -1
for inner in arr[i + 1 :]:
if outer < inner:
_snake_case = inner
break
result.append(__lowercase )
return result
def a_ ( __lowercase : list[float] ) -> list[float]:
_snake_case = len(__lowercase )
_snake_case = []
_snake_case = [-1] * arr_size
for index in reversed(range(__lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_snake_case = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowerCamelCase : Union[str, Any] = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 130
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A( a ):
snake_case_ = 42
snake_case_ = jnp.floataa
snake_case_ = True
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
super().setup()
__a = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
__a = super().__call__(*_snake_case , **_snake_case )
__a = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A( a ):
snake_case_ = FlaxBigBirdForNaturalQuestionsModule
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__ ) -> Dict:
def cross_entropy(a__ , a__ , a__=None ):
__a = logits.shape[-1]
__a = (labels[..., None] == jnp.arange(a__ )[None]).astype('''f4''' )
__a = jax.nn.log_softmax(a__ , axis=-1 )
__a = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__a = reduction(a__ )
return loss
__a = partial(a__ , reduction=jnp.mean )
__a = cross_entropy(a__ , a__ )
__a = cross_entropy(a__ , a__ )
__a = cross_entropy(a__ , a__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A:
snake_case_ = "google/bigbird-roberta-base"
snake_case_ = 3_0_0_0
snake_case_ = 1_0_5_0_0
snake_case_ = 1_2_8
snake_case_ = 3
snake_case_ = 1
snake_case_ = 5
# tx_args
snake_case_ = 3E-5
snake_case_ = 0.0
snake_case_ = 2_0_0_0_0
snake_case_ = 0.0_095
snake_case_ = "bigbird-roberta-natural-questions"
snake_case_ = "training-expt"
snake_case_ = "data/nq-training.jsonl"
snake_case_ = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=_snake_case )
__a = os.path.join(self.base_dir , self.save_dir )
__a = self.batch_size_per_device * jax.device_count()
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self , _snake_case ) -> int:
'''simple docstring'''
__a = self.collate_fn(_snake_case )
__a = jax.tree_util.tree_map(_snake_case , _snake_case )
return batch
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a , __a = self.fetch_inputs(features['''input_ids'''] )
__a = {
'''input_ids''': jnp.array(_snake_case , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(_snake_case , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = [self._fetch_inputs(_snake_case ) for ids in input_ids]
return zip(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = [1 for _ in range(len(_snake_case ) )]
while len(_snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowerCAmelCase ( a__ , a__ , a__=None ) -> List[str]:
if seed is not None:
__a = dataset.shuffle(seed=a__ )
for i in range(len(a__ ) // batch_size ):
__a = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(a__ )
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCAmelCase ( a__ , a__ , **a__ ) -> Any:
def loss_fn(a__ ):
__a = model_inputs.pop('''start_labels''' )
__a = model_inputs.pop('''end_labels''' )
__a = model_inputs.pop('''pooled_labels''' )
__a = state.apply_fn(**a__ , params=a__ , dropout_rng=a__ , train=a__ )
__a , __a , __a = outputs
return state.loss_fn(
a__ , a__ , a__ , a__ , a__ , a__ , )
__a , __a = jax.random.split(a__ )
__a = jax.value_and_grad(a__ )
__a , __a = grad_fn(state.params )
__a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__a = jax.lax.pmean(a__ , '''batch''' )
__a = state.apply_gradients(grads=a__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __lowerCAmelCase ( a__ , **a__ ) -> str:
__a = model_inputs.pop('''start_labels''' )
__a = model_inputs.pop('''end_labels''' )
__a = model_inputs.pop('''pooled_labels''' )
__a = state.apply_fn(**a__ , params=state.params , train=a__ )
__a , __a , __a = outputs
__a = state.loss_fn(a__ , a__ , a__ , a__ , a__ , a__ )
__a = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class __A( train_state.TrainState ):
snake_case_ = struct.field(pytree_node=a )
@dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ) -> Union[str, Any]:
'''simple docstring'''
__a = model.params
__a = TrainState.create(
apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , loss_fn=_snake_case , )
if ckpt_dir is not None:
__a , __a , __a , __a , __a = restore_checkpoint(_snake_case , _snake_case )
__a = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__a , __a = build_tx(**_snake_case )
__a = train_state.TrainState(
step=_snake_case , apply_fn=model.__call__ , params=_snake_case , tx=_snake_case , opt_state=_snake_case , )
__a = args
__a = data_collator
__a = lr
__a = params
__a = jax_utils.replicate(_snake_case )
return state
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.args
__a = len(_snake_case ) // args.batch_size
__a = jax.random.PRNGKey(0 )
__a = jax.random.split(_snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
__a = jnp.array(0 , dtype=jnp.floataa )
__a = get_batched_dataset(_snake_case , args.batch_size , seed=_snake_case )
__a = 0
for batch in tqdm(_snake_case , total=_snake_case , desc=F"""Running EPOCH-{epoch}""" ):
__a = self.data_collator(_snake_case )
__a , __a , __a = self.train_step_fn(_snake_case , _snake_case , **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__a = jax_utils.unreplicate(state.step )
__a = running_loss.item() / i
__a = self.scheduler_fn(state_step - 1 )
__a = self.evaluate(_snake_case , _snake_case )
__a = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(_snake_case ) )
self.logger.log(_snake_case , commit=_snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = get_batched_dataset(_snake_case , self.args.batch_size )
__a = len(_snake_case ) // self.args.batch_size
__a = jnp.array(0 , dtype=jnp.floataa )
__a = 0
for batch in tqdm(_snake_case , total=_snake_case , desc='''Evaluating ... ''' ):
__a = self.data_collator(_snake_case )
__a = self.val_step_fn(_snake_case , **_snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> List[str]:
'''simple docstring'''
__a = jax_utils.unreplicate(_snake_case )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(_snake_case , params=state.params )
with open(os.path.join(_snake_case , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_snake_case , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(_snake_case , '''data_collator.joblib''' ) )
with open(os.path.join(_snake_case , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , _snake_case )
print('''DONE''' )
def __lowerCAmelCase ( a__ , a__ ) -> List[Any]:
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' )
with open(os.path.join(a__ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__a = from_bytes(state.params , f.read() )
with open(os.path.join(a__ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__a = from_bytes(state.opt_state , f.read() )
__a = joblib.load(os.path.join(a__ , '''args.joblib''' ) )
__a = joblib.load(os.path.join(a__ , '''data_collator.joblib''' ) )
with open(os.path.join(a__ , '''training_state.json''' ) , '''r''' ) as f:
__a = json.load(a__ )
__a = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Optional[Any]:
__a = num_train_steps - warmup_steps
__a = optax.linear_schedule(init_value=a__ , end_value=a__ , transition_steps=a__ )
__a = optax.linear_schedule(init_value=a__ , end_value=1e-7 , transition_steps=a__ )
__a = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowerCAmelCase ( a__ , a__ , a__ , a__ , a__ ) -> str:
def weight_decay_mask(a__ ):
__a = traverse_util.flatten_dict(a__ )
__a = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(a__ )
__a = scheduler_fn(a__ , a__ , a__ , a__ )
__a = optax.adamw(learning_rate=a__ , weight_decay=a__ , mask=a__ )
return tx, lr
| 6
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any]=7 , __lowerCamelCase : Dict=3 , __lowerCamelCase : Union[str, Any]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Dict=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=[0.5, 0.5, 0.5] , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=1 / 255 , __lowerCamelCase : Optional[Any]=True , ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Union[str, Any] = size
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : int = image_mean
lowerCamelCase__ : Optional[int] = image_std
lowerCamelCase__ : List[Any] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Any = do_pad
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str]=False ):
'''simple docstring'''
if not batched:
lowerCamelCase__ : Tuple = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size["shortest_edge"] * h / w )
lowerCamelCase__ : Optional[Any] = self.size["shortest_edge"]
elif w > h:
lowerCamelCase__ : List[Any] = self.size["shortest_edge"]
lowerCamelCase__ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase__ : Optional[int] = self.size["shortest_edge"]
lowerCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
else:
lowerCamelCase__ : Dict = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : str = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowercase ( lowercase__ , unittest.TestCase):
"""simple docstring"""
A__ = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = DetaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
lowerCamelCase__ : List[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase__ : Dict = json.loads(f.read() )
lowerCamelCase__ : Any = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase__ : Union[str, Any] = DetaImageProcessor()
lowerCamelCase__ : List[str] = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ : Optional[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
lowerCamelCase__ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ : Optional[int] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase__ : Tuple = json.loads(f.read() )
lowerCamelCase__ : List[str] = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase__ : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase__ : Tuple = DetaImageProcessor(format="coco_panoptic" )
lowerCamelCase__ : Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ : List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ : List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
lowerCamelCase__ : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ : Union[str, Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 184
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Optional[int] = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Optional[int] = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Dict =logging.get_logger(__name__)
A__ : Dict ={
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: List[str] = '''lilt'''
def __init__( self : str , __snake_case : Any=3_05_22 , __snake_case : str=7_68 , __snake_case : Any=12 , __snake_case : List[str]=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : Tuple="gelu" , __snake_case : List[str]=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict=5_12 , __snake_case : Optional[int]=2 , __snake_case : int=0.02 , __snake_case : Any=1E-1_2 , __snake_case : Optional[int]=0 , __snake_case : List[str]="absolute" , __snake_case : Dict=None , __snake_case : List[str]=4 , __snake_case : List[Any]=10_24 , **__snake_case : List[Any] , ) -> List[str]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = channel_shrink_ratio
_lowerCAmelCase = max_ad_position_embeddings
| 70
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase : Any = {'vocab_file': 'spiece.model'}
lowerCAmelCase : Tuple = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
lowerCAmelCase : Optional[int] = {'bert_for_seq_generation': 5_12}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[int] = []
SCREAMING_SNAKE_CASE : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<::::>" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
return token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 253
| 0
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
snake_case_ = 'https://openaipublic.azureedge.net/jukebox/models/'
snake_case_ = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def lowerCamelCase__ ( snake_case_ : Union[str, Any] ) -> List[str]:
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__snake_case = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__snake_case = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__snake_case = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__snake_case = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__snake_case = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ) -> Optional[Any]:
__snake_case = {}
import re
__snake_case = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__snake_case = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__snake_case = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_conv_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] )
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ )
elif re_encoder_block_resnet.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] )
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_encoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_encoder_block_proj_out.fullmatch(snake_case_ ):
__snake_case = re_encoder_block_proj_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
__snake_case = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_conv_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ )
elif re_decoder_block_resnet.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[2] ) * 2 + int(groups[3] ) - 2
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_decoder_block_resnet.sub(snake_case_ , snake_case_ )
elif re_decoder_block_proj_in.fullmatch(snake_case_ ):
__snake_case = re_decoder_block_proj_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
__snake_case = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_conv_out.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
__snake_case = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ )
elif re_prior_cond_resnet.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_resnet.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = int(groups[1] ) * 2 + int(groups[2] ) - 2
__snake_case = {'''1''': 1, '''3''': 2}[groups[-2]]
__snake_case = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
__snake_case = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
__snake_case = prefix + resnet_block
__snake_case = re_prior_cond_resnet.sub(snake_case_ , snake_case_ )
elif re_prior_cond_proj_in.fullmatch(snake_case_ ):
__snake_case = re_prior_cond_proj_in.match(snake_case_ )
__snake_case = regex_match.groups()
__snake_case = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
__snake_case = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ )
# keep original key
else:
__snake_case = original_key
__snake_case = replace_key(snake_case_ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
__snake_case = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
__snake_case = original_key
__snake_case = original_key
__snake_case = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : str=None , snake_case_ : Tuple=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
__snake_case = requests.get(f"""{PREFIX}{file}""" , allow_redirects=snake_case_ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=snake_case_ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , '''wb''' ).write(r.content )
__snake_case = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__snake_case = JukeboxConfig.from_pretrained(snake_case_ )
__snake_case = JukeboxModel(snake_case_ )
__snake_case = []
__snake_case = {}
for i, dict_name in enumerate(snake_case_ ):
__snake_case = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['''model''']
__snake_case = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__snake_case = old_dic[k]
elif k.endswith('''.w''' ):
__snake_case = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__snake_case = old_dic[k]
else:
__snake_case = old_dic[k]
__snake_case = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
__snake_case = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ )
weight_dict.append(snake_case_ )
__snake_case = weight_dict.pop(0 )
model.vqvae.load_state_dict(snake_case_ )
for i in range(len(snake_case_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(snake_case_ , snake_case_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
return weight_dict
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
snake_case_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 238
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( snake_case_ : str ) -> str:
return "".join(sorted(snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : str ) -> list[str]:
return word_by_signature[signature(snake_case_ )]
snake_case_ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
snake_case_ = sorted({word.strip().lower() for word in data.splitlines()})
snake_case_ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
snake_case_ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 238
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCamelCase : Any = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : List[Any] ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , lowercase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase_ = processors[data_args.task_name]()
lowerCamelCase_ = processor.get_labels()
lowerCamelCase_ = len(lowercase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowercase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
lowerCamelCase_ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowercase , p.label_ids )}
# Data collator
lowerCamelCase_ = DataCollatorWithPadding(lowercase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase_ = trainer.evaluate()
lowerCamelCase_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(lowercase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , lowercase , lowercase )
writer.write('%s = %s\n' % (key, value) )
results.update(lowercase )
return results
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 204
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase : Union[str, Any] = "src/diffusers"
# Matches is_xxx_available()
lowerCamelCase : Dict = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCamelCase : Union[str, Any] = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCamelCase : Any = "\n{0} = None\n"
lowerCamelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCamelCase : str = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ = _re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
with open(os.path.join(lowercase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ = 0
lowerCamelCase_ = {}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
lowerCamelCase_ = []
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
lowerCamelCase_ = lines[line_index]
lowerCamelCase_ = _re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
lowerCamelCase_ = objects
else:
line_index += 1
return backend_specific_objects
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : str ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowerCamelCase_ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ = {}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ = '[' + ', '.join(f"""\"{b}\"""" for b in backend.split('_and_' ) ) + ']'
lowerCamelCase_ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
lowerCamelCase_ = dummy_file
return dummy_files
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int]=False ):
'''simple docstring'''
lowerCamelCase_ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
lowerCamelCase_ = os.path.join(lowercase , 'utils' )
lowerCamelCase_ = {
backend: os.path.join(lowercase , f"""dummy_{short_names.get(lowercase , lowercase )}_objects.py""" )
for backend in dummy_files.keys()
}
lowerCamelCase_ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCamelCase_ = f.read()
else:
lowerCamelCase_ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main """
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
f"""diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` """
'to fix this.' )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCamelCase : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 204
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class a__ ( __snake_case ):
A__ : Optional[Any] = 'git_vision_model'
def __init__( self , UpperCAmelCase=7_6_8 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase="quick_gelu" , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase )
__a , __a = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
__a = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class a__ ( __snake_case ):
A__ : Dict = 'git'
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=3_0_5_2_2 , UpperCAmelCase=7_6_8 , UpperCAmelCase=6 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=1_0_1 , UpperCAmelCase=1_0_2 , UpperCAmelCase=None , **UpperCAmelCase , ) -> Any:
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , pad_token_id=UpperCAmelCase , **UpperCAmelCase )
if vision_config is None:
__a = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
__a = GitVisionConfig(**UpperCAmelCase )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = tie_word_embeddings
__a = num_image_with_embedding
__a = bos_token_id
__a = eos_token_id
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 197
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCamelCase_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(__snake_case )
class a__ ( __snake_case ):
def __init__( self , **UpperCAmelCase ) -> List[str]:
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple:
if "text_queries" in kwargs:
__a = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
__a = {'image': image, 'candidate_labels': candidate_labels}
else:
__a = image
__a = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> List[str]:
__a = {}
if "threshold" in kwargs:
__a = kwargs['threshold']
if "top_k" in kwargs:
__a = kwargs['top_k']
return {}, {}, postprocess_params
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Union[str, Any]:
__a = load_image(inputs['image'] )
__a = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = candidate_labels.split(',' )
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
__a = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
__a = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
__a = model_inputs.pop('target_size' )
__a = model_inputs.pop('candidate_label' )
__a = model_inputs.pop('is_last' )
__a = self.model(**UpperCAmelCase )
__a = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0.1 , UpperCAmelCase=None ) -> Tuple:
__a = []
for model_output in model_outputs:
__a = model_output['candidate_label']
__a = BaseModelOutput(UpperCAmelCase )
__a = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
__a = outputs['scores'][index].item()
__a = self._get_bounding_box(outputs['boxes'][index][0] )
__a = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
__a = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
__a = results[:top_k]
return results
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
__a , __a , __a , __a = box.int().tolist()
__a = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 197
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
return max(metric_fn(lowerCamelCase__ , lowerCamelCase__ ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
__lowercase = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowercase = []
if args.gold_data_mode == "qa":
__lowercase = pd.read_csv(lowerCamelCase__ , sep='\t' , header=lowerCamelCase__ )
for answer_list in data[1]:
__lowercase = ast.literal_eval(lowerCamelCase__ )
answers.append(lowerCamelCase__ )
else:
__lowercase = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowercase = [[reference] for reference in references]
__lowercase = 0
for prediction, ground_truths in zip(lowerCamelCase__ , lowerCamelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
fa += metric_max_over_ground_truths(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowercase = 100.0 * em / total
__lowercase = 100.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
__lowercase = args.k
__lowercase = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowercase = [line.strip() for line in open(lowerCamelCase__ , 'r' ).readlines()]
__lowercase = 0
for hypo, reference in zip(lowerCamelCase__ , lowerCamelCase__ ):
__lowercase = set(hypo.split('\t' )[:k] )
__lowercase = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__lowercase = 100.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
def strip_title(SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith('\"' ):
__lowercase = title[1:]
if title.endswith('\"' ):
__lowercase = title[:-1]
return title
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )["input_ids"].to(args.device )
__lowercase = rag_model.rag.question_encoder(lowerCamelCase__ )
__lowercase = question_enc_outputs[0]
__lowercase = rag_model.retriever(
lowerCamelCase__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
__lowercase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__lowercase = []
for docs in all_docs:
__lowercase = [strip_title(lowerCamelCase__ ) for title in docs["title"]]
provenance_strings.append('\t'.join(lowerCamelCase__ ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
with torch.no_grad():
__lowercase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__lowercase = inputs_dict.input_ids.to(args.device )
__lowercase = inputs_dict.attention_mask.to(args.device )
__lowercase = rag_model.generate( # rag_model overwrites generate
lowerCamelCase__ , attention_mask=lowerCamelCase__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCamelCase__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__lowercase = rag_model.retriever.generator_tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
if args.print_predictions:
for q, a in zip(lowerCamelCase__ , lowerCamelCase__ ):
logger.info('Q: {} - A: {}'.format(lowerCamelCase__ , lowerCamelCase__ ) )
return answers
def __SCREAMING_SNAKE_CASE ( ) -> int:
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=lowerCamelCase__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=lowerCamelCase__ , choices=['exact', 'compressed', 'legacy'] , type=lowerCamelCase__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=lowerCamelCase__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=lowerCamelCase__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=lowerCamelCase__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=lowerCamelCase__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=lowerCamelCase__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=lowerCamelCase__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=lowerCamelCase__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=lowerCamelCase__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=lowerCamelCase__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
__lowercase = parser.parse_args()
__lowercase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> Any:
__lowercase = {}
if args.model_type is None:
__lowercase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
__lowercase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__lowercase = args.n_docs
if args.index_name is not None:
__lowercase = args.index_name
if args.index_path is not None:
__lowercase = args.index_path
else:
__lowercase = BartForConditionalGeneration
__lowercase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , lowerCamelCase__ )
__lowercase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__lowercase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCamelCase__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
__lowercase = RagRetriever.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
__lowercase = model_class.from_pretrained(lowerCamelCase__ , retriever=lowerCamelCase__ , **lowerCamelCase__ )
model.retriever.init_retrieval()
else:
__lowercase = model_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
__lowercase = []
for line in tqdm(lowerCamelCase__ ):
questions.append(line.strip() )
if len(lowerCamelCase__ ) == args.eval_batch_size:
__lowercase = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write('\n'.join(lowerCamelCase__ ) + '\n' )
preds_file.flush()
__lowercase = []
if len(lowerCamelCase__ ) > 0:
__lowercase = evaluate_batch_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
preds_file.write('\n'.join(lowerCamelCase__ ) )
preds_file.flush()
score_fn(lowerCamelCase__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 325
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = []
for part_id in partition_order:
lowercase__ : str = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowerCamelCase__ ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Tuple = spark.range(10 ).repartition(2 )
lowercase__ : Any = [1, 0]
lowercase__ : Optional[int] = _generate_iterable_examples(lowerCamelCase__ , lowerCamelCase__ ) # Reverse the partitions.
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , lowerCamelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase__ , lowercase__ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(10 ).repartition(1 )
lowercase__ : Optional[int] = SparkExamplesIterable(lowerCamelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
lowercase__ : int = lambda lowerCamelCase__ : x.reverse()
lowercase__ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [2, 1, 0] )
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shuffle_data_sources(lowerCamelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase__ : Optional[Any] = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase__ : int = SparkExamplesIterable(lowerCamelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCamelCase__ ):
lowercase__ , lowercase__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
lowercase__ : int = spark.range(100 ).repartition(1 )
lowercase__ : Tuple = Spark(lowerCamelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 130
| 0
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__=0.01 , lowerCAmelCase__=1_000 ) -> Dict:
SCREAMING_SNAKE_CASE = p_stop
SCREAMING_SNAKE_CASE = max_length
def __iter__( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE = random.random() < self.p_stop
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> List[str]:
SCREAMING_SNAKE_CASE = [
BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
for i in range(2 )
]
SCREAMING_SNAKE_CASE = [list(lowerCAmelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase__ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase__ ) for e in expected] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __A ( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __A ( self ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase__ , lowerCAmelCase__ , split_batches=lowerCAmelCase__ , even_batches=lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE = [BatchSamplerShard(lowerCAmelCase__ , 2 , lowerCAmelCase__ , even_batches=lowerCAmelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=2 , lowerCAmelCase__=False ) -> List[str]:
random.seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
IterableDatasetShard(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , drop_last=lowerCAmelCase__ , num_processes=lowerCAmelCase__ , process_index=lowerCAmelCase__ , split_batches=lowerCAmelCase__ , )
for i in range(lowerCAmelCase__ )
]
SCREAMING_SNAKE_CASE = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase__ )
iterable_dataset_lists.append(list(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
self.assertTrue(len(lowerCAmelCase__ ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE = []
for idx in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
reference += reference
self.assertListEqual(lowerCAmelCase__ , reference[: len(lowerCAmelCase__ )] )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
self.check_iterable_dataset_shards(lowerCAmelCase__ , lowerCAmelCase__ , batch_size=4 , drop_last=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = SkipBatchSampler(lowerCAmelCase__ , 2 )
self.assertListEqual(list(lowerCAmelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE = skip_first_batches(lowerCAmelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ) -> Dict:
Accelerator()
SCREAMING_SNAKE_CASE = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 354
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# pop unnecessary weights
SCREAMING_SNAKE_CASE = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE = sd.pop(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.q_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.k_proj.' )
SCREAMING_SNAKE_CASE = key.replace('.qkv_proj.' , '.v_proj.' )
SCREAMING_SNAKE_CASE = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(SCREAMING_SNAKE_CASE_ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE = q
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = v
del sd[key]
return sd
@torch.no_grad()
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int]=None ) -> List[Any]:
SCREAMING_SNAKE_CASE = load_checkpoint(SCREAMING_SNAKE_CASE_ )
if config is not None:
SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = OPTConfig()
SCREAMING_SNAKE_CASE = OPTModel(SCREAMING_SNAKE_CASE_ ).half().eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check results
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__UpperCamelCase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 38
| 0
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : int = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''mctct'''
def __init__(self , SCREAMING_SNAKE_CASE__=80_65 , SCREAMING_SNAKE_CASE__=15_36 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=9_20 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.3 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=(7,) , SCREAMING_SNAKE_CASE__=(3,) , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="sum" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_head_dim
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = layerdrop
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : Any = conv_glu_dim
SCREAMING_SNAKE_CASE__ : Tuple = conv_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_layers
SCREAMING_SNAKE_CASE__ : Tuple = input_feat_per_channel
SCREAMING_SNAKE_CASE__ : Dict = input_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_channels
SCREAMING_SNAKE_CASE__ : List[str] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE__ : int = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = list(SCREAMING_SNAKE_CASE__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 25
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = torch.device('''cpu''')
def lowerCAmelCase__( ) -> Any:
__snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
__snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCAmelCase__( lowercase : Dict ) -> List[Any]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def lowerCAmelCase__( lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] ) -> List[Any]:
__snake_case : List[Any] = dct.pop(lowercase )
__snake_case : List[Any] = val
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : Optional[Any] = []
for k in state_dict.keys():
__snake_case : Union[str, Any] = k
if ".pwconv" in k:
__snake_case : Any = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
__snake_case : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
__snake_case : Optional[int] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
__snake_case : int = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
__snake_case : int = k_new.split("." )
if ls[2].isdigit():
__snake_case : List[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
__snake_case : Optional[int] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : List[str] ) -> Union[str, Any]:
__snake_case : List[str] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__snake_case : Tuple = 1000
__snake_case : Any = "huggingface/label-files"
__snake_case : int = "imagenet-1k-id2label.json"
__snake_case : Dict = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
__snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
__snake_case : int = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__snake_case : Optional[Any] = [3, 3, 6, 4]
__snake_case : Optional[int] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__snake_case : List[str] = [3, 3, 9, 6]
__snake_case : Optional[Any] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__snake_case : Optional[int] = [4, 3, 10, 5]
__snake_case : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__snake_case : str = [4, 4, 12, 6]
__snake_case : Optional[Any] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
__snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" , check_hash=lowercase )
else:
__snake_case : Tuple = torch.load(lowercase , map_location="cpu" )
__snake_case : Optional[int] = checkpoint
__snake_case : Any = create_rename_keys(lowercase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# load HuggingFace model
__snake_case : Tuple = SwiftFormerForImageClassification(lowercase ).eval()
hf_model.load_state_dict(lowercase )
# prepare test inputs
__snake_case : Optional[Any] = prepare_img()
__snake_case : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
__snake_case : Optional[int] = processor(images=lowercase , return_tensors="pt" )
# compare outputs from both models
__snake_case : str = get_expected_output(lowercase )
__snake_case : Optional[int] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowercase , atol=1E-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
_UpperCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 326
| 0
|
"""simple docstring"""
from collections import deque
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = process_name # process name
snake_case = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case = arrival_time
snake_case = burst_time # remaining burst time
snake_case = 0 # total time of the process wait in ready queue
snake_case = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case = time_slices
# unfinished process is in this ready_queue
snake_case = queue
# current time
snake_case = current_time
# finished process is in this sequence queue
snake_case = deque()
def snake_case ( self ):
"""simple docstring"""
snake_case = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for i in range(len(lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for i in range(len(lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for i in range(len(lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return [q.burst_time for q in queue]
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = deque() # sequence deque of finished process
while len(lowerCAmelCase ) != 0:
snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case = 0
# set the process's turnaround time because it is finished
snake_case = self.current_time - cp.arrival_time
# set the completion time
snake_case = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case ( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCAmelCase ) ):
snake_case = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case = 0
# set the finish time
snake_case = self.current_time
# update the process' turnaround time because it is finished
snake_case = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCAmelCase )
self.finish_queue.extend(lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
snake_case ,snake_case = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE__ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE__ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE__ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE__ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [17, 25]
SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE__ = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE__ = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE__ = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE__ = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = [17, 25]
SCREAMING_SNAKE_CASE__ = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE__ = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE__ = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 371
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
pass
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = data
snake_case = None
def __iter__( self ):
"""simple docstring"""
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase )
yield node.data
snake_case = node.next_node
@property
def snake_case ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = Node(1)
SCREAMING_SNAKE_CASE__ = Node(2)
SCREAMING_SNAKE_CASE__ = Node(3)
SCREAMING_SNAKE_CASE__ = Node(4)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = root_node.next_node
print(root_node.has_loop) # True
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
SCREAMING_SNAKE_CASE__ = Node(5)
SCREAMING_SNAKE_CASE__ = Node(6)
print(root_node.has_loop) # False
SCREAMING_SNAKE_CASE__ = Node(1)
print(root_node.has_loop) # False
| 149
| 0
|
"""simple docstring"""
import math
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Tuple =2
lowerCamelCase__ : Optional[int] =int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowerCamelCase__ : Optional[Any] =[True] * (end + 1)
lowerCamelCase__ : Optional[Any] =[]
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start , end + 1 , __lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =False
start += 1
prime += in_prime
lowerCamelCase__ : Optional[Any] =end + 1
lowerCamelCase__ : Optional[int] =min(2 * end , __lowerCamelCase )
while low <= n:
lowerCamelCase__ : List[Any] =[True] * (high - low + 1)
for each in in_prime:
lowerCamelCase__ : Tuple =math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase , high + 1 , __lowerCamelCase ):
lowerCamelCase__ : List[Any] =False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase__ : Dict =high + 1
lowerCamelCase__ : Any =min(high + end , __lowerCamelCase )
return prime
print(sieve(1_0**6))
| 238
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = DDIMPipeline
_a = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_a = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_a = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_a = False
def snake_case ( self : str )-> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
lowerCamelCase__ : Optional[Any] =DDIMScheduler()
lowerCamelCase__ : List[Any] ={'''unet''': unet, '''scheduler''': scheduler}
return components
def snake_case ( self : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[Any]=0 )-> Optional[int]:
if str(lowerCamelCase ).startswith('''mps''' ):
lowerCamelCase__ : Dict =torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase__ : Optional[int] =torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase__ : Tuple ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self : Dict )-> str:
lowerCamelCase__ : Optional[Any] ='''cpu'''
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Optional[int] =self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase__ : Any =pipe(**lowerCamelCase ).images
lowerCamelCase__ : Optional[Any] =image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 32, 32, 3) )
lowerCamelCase__ : Tuple =np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
lowerCamelCase__ : str =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase, 1E-3 )
def snake_case ( self : Union[str, Any] )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case ( self : Union[str, Any] )-> int:
super().test_save_load_local(expected_max_difference=3E-3 )
def snake_case ( self : List[Any] )-> List[Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def snake_case ( self : Optional[Any] )-> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self : Optional[Any] )-> List[str]:
lowerCamelCase__ : Optional[Any] ='''google/ddpm-cifar10-32'''
lowerCamelCase__ : Union[str, Any] =UNetaDModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[int] =DDIMScheduler()
lowerCamelCase__ : int =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase )
ddim.to(lowerCamelCase )
ddim.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : Tuple =torch.manual_seed(0 )
lowerCamelCase__ : int =ddim(generator=lowerCamelCase, eta=0.0, output_type='''numpy''' ).images
lowerCamelCase__ : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Any =np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : Optional[int] )-> Any:
lowerCamelCase__ : str ='''google/ddpm-ema-bedroom-256'''
lowerCamelCase__ : Optional[int] =UNetaDModel.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Any =DDIMScheduler.from_pretrained(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =DDIMPipeline(unet=lowerCamelCase, scheduler=lowerCamelCase )
ddpm.to(lowerCamelCase )
ddpm.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase__ : List[str] =torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] =ddpm(generator=lowerCamelCase, output_type='''numpy''' ).images
lowerCamelCase__ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ : Any =np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 238
| 1
|
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase__ : Tuple ='src/diffusers'
# Matches is_xxx_available()
lowerCAmelCase__ : List[Any] =re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
lowerCAmelCase__ : Optional[Any] =re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
lowerCAmelCase__ : List[Any] ='\n{0} = None\n'
lowerCAmelCase__ : List[Any] ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
lowerCAmelCase__ : Optional[int] ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = _re_backend.findall(A__ )
if len(A__ ) == 0:
return None
return "_and_".join(A__ )
def a__ ( ):
with open(os.path.join(A__, '__init__.py' ), 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : str = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = {}
# Go through the end of the file
while line_index < len(A__ ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE_ : Optional[Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE_ : int = []
# Until we unindent, add backend objects to the list
while line_index < len(A__ ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE_ : str = lines[line_index]
SCREAMING_SNAKE_CASE_ : Dict = _re_single_line_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE_ : Dict = objects
else:
line_index += 1
return backend_specific_objects
def a__ ( A__, A__ ):
if name.isupper():
return DUMMY_CONSTANT.format(A__ )
elif name.islower():
return DUMMY_FUNCTION.format(A__, A__ )
else:
return DUMMY_CLASS.format(A__, A__ )
def a__ ( A__=None ):
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE_ : List[str] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE_ : List[str] = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE_ : Optional[int] = '[' + ', '.join(F'''"{b}"''' for b in backend.split('_and_' ) ) + ']'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A__, A__ ) for o in objects] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dummy_file
return dummy_files
def a__ ( A__=False ):
SCREAMING_SNAKE_CASE_ : List[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE_ : int = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE_ : Any = os.path.join(A__, 'utils' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
backend: os.path.join(A__, F'''dummy_{short_names.get(A__, A__ )}_objects.py''' )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE_ : Dict = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A__ ):
with open(A__, 'r', encoding='utf-8', newline='\n' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.read()
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(A__, A__ )}_objects.py as the main '''
'__init__ has new objects.' )
with open(dummy_file_paths[backend], 'w', encoding='utf-8', newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'''diffusers.utils.dummy_{short_names.get(A__, A__ )}_objects.py. Run `make fix-copies` '''
'to fix this.' )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase__ : List[str] =parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 162
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : int = use_input_mask
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = DistilBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DistilBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = DistilBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DistilBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCAmelCase = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = DistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = DistilBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : str = model_class(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = torch.jit.trace(
lowerCAmelCase__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.jit.load(os.path.join(lowerCAmelCase__ , 'traced_model.pt' ) , map_location=lowerCAmelCase__ )
loaded(inputs_dict['input_ids'].to(lowerCAmelCase__ ) , inputs_dict['attention_mask'].to(lowerCAmelCase__ ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = DistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1E-4 ) )
| 162
| 1
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__lowerCAmelCase : List[Any] ="""
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCAmelCase : Optional[int] ="""
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCAmelCase : Tuple =R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = spearmanr(__lowerCAmelCase , __lowerCAmelCase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 197
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'dpr'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase = 0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = projection_dim
lowercase = position_embedding_type
| 197
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : List[Any] = (PNDMScheduler,)
snake_case__ : Optional[Any] = (("num_inference_steps", 5_0),)
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[Any]=0 , **__lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
_lowerCamelCase : Dict = self.dummy_sample
_lowerCamelCase : Optional[int] = 0.1 * sample
_lowerCamelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCamelCase : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCamelCase : Any = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
_lowerCamelCase : List[Any] = dummy_past_residuals[:]
_lowerCamelCase : Tuple = scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = new_scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : int = scheduler.step_plms(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[str] = new_scheduler.step_plms(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : List[Any]=0 , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCamelCase : List[str] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
_lowerCamelCase : Any = self.dummy_sample
_lowerCamelCase : Any = 0.1 * sample
_lowerCamelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : int = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCamelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCamelCase : List[Any] = dummy_past_residuals[:]
_lowerCamelCase : Any = scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[int] = new_scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCamelCase : Optional[int] = scheduler.step_plms(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[Any] = new_scheduler.step_plms(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : Any = self.get_scheduler_config(**__lowerCAmelCase )
_lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = 1_0
_lowerCamelCase : Optional[Any] = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCamelCase : Dict = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[str] = scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Dict = scheduler.step_plms(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : str = dict(self.forward_default_kwargs )
_lowerCamelCase : List[str] = kwargs.pop('''num_inference_steps''' , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Any = self.dummy_sample
_lowerCamelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , '''set_timesteps''' ):
_lowerCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCamelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCamelCase : Dict = dummy_past_residuals[:]
_lowerCamelCase : Any = scheduler.step_prk(__lowerCAmelCase , 0 , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = scheduler.step_prk(__lowerCAmelCase , 1 , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCamelCase : Union[str, Any] = scheduler.step_plms(__lowerCAmelCase , 0 , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
_lowerCamelCase : Optional[int] = scheduler.step_plms(__lowerCAmelCase , 1 , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCAmelCase )
_lowerCamelCase : Dict = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config(steps_offset=1 )
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = 2_7
for scheduler_class in self.scheduler_classes:
_lowerCamelCase : Dict = self.dummy_sample
_lowerCamelCase : Dict = 0.1 * sample
_lowerCamelCase : Any = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCamelCase : Dict = scheduler.step_prk(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Tuple = self.get_scheduler_config()
_lowerCamelCase : Optional[int] = scheduler_class(**__lowerCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.full_loop()
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[Any] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.full_loop(prediction_type='''v_prediction''' )
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : int = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.full_loop(set_alpha_to_one=__lowerCAmelCase , beta_start=0.01 )
_lowerCamelCase : List[str] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = self.full_loop(set_alpha_to_one=__lowerCAmelCase , beta_start=0.01 )
_lowerCamelCase : Dict = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Dict = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 175
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class __snake_case ( _lowercase):
def __init__( self : Any , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self : Dict , __lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCAmelCase : Union[str, List[str]] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
if "text_queries" in kwargs:
_lowerCamelCase : List[Any] = kwargs.pop('''text_queries''' )
if isinstance(__lowerCAmelCase , (str, Image.Image) ):
_lowerCamelCase : Optional[int] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_lowerCamelCase : List[Any] = image
_lowerCamelCase : List[str] = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = {}
if "threshold" in kwargs:
_lowerCamelCase : Optional[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
_lowerCamelCase : int = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = load_image(inputs['''image'''] )
_lowerCamelCase : Optional[Any] = inputs['''candidate_labels''']
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : int = candidate_labels.split(''',''' )
_lowerCamelCase : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
_lowerCamelCase : Optional[Any] = self.image_processor(__lowerCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = model_inputs.pop('''target_size''' )
_lowerCamelCase : List[Any] = model_inputs.pop('''candidate_label''' )
_lowerCamelCase : Dict = model_inputs.pop('''is_last''' )
_lowerCamelCase : str = self.model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase : str = []
for model_output in model_outputs:
_lowerCamelCase : Any = model_output['''candidate_label''']
_lowerCamelCase : Union[str, Any] = BaseModelOutput(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.image_processor.post_process_object_detection(
outputs=__lowerCAmelCase , threshold=__lowerCAmelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_lowerCamelCase : Tuple = outputs['''scores'''][index].item()
_lowerCamelCase : Optional[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] )
_lowerCamelCase : Optional[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(__lowerCAmelCase )
_lowerCamelCase : int = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )
if top_k:
_lowerCamelCase : Dict = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = box.int().tolist()
_lowerCamelCase : Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 175
| 1
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a__ = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
_snake_case : Union[str, Any] = [image]
_snake_case : Tuple = [trans(img.convert("""RGB""" ) ) for img in image]
_snake_case : int = torch.stack(SCREAMING_SNAKE_CASE__ )
return image
class snake_case ( _a ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Any) -> List[str]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_snake_case : List[Any] = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=__lowerCamelCase , scheduler=__lowerCamelCase)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : List[str]) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''')
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]) -> Optional[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = min(int(num_inference_steps * strength) , __lowerCamelCase)
_snake_case : Dict = max(num_inference_steps - init_timestep , 0)
_snake_case : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int]=None) -> Optional[int]:
"""simple docstring"""
if not isinstance(__lowerCamelCase , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCamelCase)}''')
_snake_case : Tuple = image.to(device=__lowerCamelCase , dtype=__lowerCamelCase)
if isinstance(__lowerCamelCase , __lowerCamelCase) and len(__lowerCamelCase) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(__lowerCamelCase)}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''')
_snake_case : Tuple = init_latents.shape
_snake_case : List[str] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase , dtype=__lowerCamelCase)
# get latents
print("""add noise to latents at timestep""" , __lowerCamelCase)
_snake_case : List[Any] = self.scheduler.add_noise(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_snake_case : Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCAmelCase : float = 0.8 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : float = 0.0 , lowerCAmelCase : int = 50 , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> Dict:
"""simple docstring"""
self.check_inputs(__lowerCamelCase)
# 2. Preprocess image
_snake_case : Any = preprocess(__lowerCamelCase)
# 3. set timesteps
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_snake_case : Dict = self.get_timesteps(__lowerCamelCase , __lowerCamelCase , self.device)
_snake_case : List[Any] = timesteps[:1].repeat(__lowerCamelCase)
# 4. Prepare latent variables
_snake_case : str = self.prepare_latents(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , self.unet.dtype , self.device , __lowerCamelCase)
_snake_case : List[str] = latents
# 5. Denoising loop
for t in self.progress_bar(__lowerCamelCase):
# 1. predict noise model_output
_snake_case : List[str] = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_snake_case : Union[str, Any] = self.scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , eta=__lowerCamelCase , use_clipped_model_output=__lowerCamelCase , generator=__lowerCamelCase , ).prev_sample
_snake_case : Dict = (image / 2 + 0.5).clamp(0 , 1)
_snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_snake_case : List[str] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__lowerCamelCase)
| 317
|
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool:
"""simple docstring"""
if curr_ind == len(__magic_name__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__magic_name__ ) ):
if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
# Insert current vertex into path as next transition
UpperCamelCase :str = next_ver
# Validate created path
if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ):
return True
# Backtrack
UpperCamelCase :Union[str, Any] = -1
return False
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1)
# initialize start and end of path with starting index
UpperCamelCase :Any = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
| 38
| 0
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'tf_padding' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'depth_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=8, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="relu6", SCREAMING_SNAKE_CASE_=1280, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, ) -> List[Any]:
UpperCamelCase : str = parent
UpperCamelCase : str = batch_size
UpperCamelCase : List[str] = num_channels
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = depth_multiplier
UpperCamelCase : Optional[int] = depth_divisible_by
UpperCamelCase : Dict = min_depth
UpperCamelCase : Dict = expand_ratio
UpperCamelCase : Optional[Any] = tf_padding
UpperCamelCase : str = output_stride
UpperCamelCase : Union[str, Any] = first_layer_is_expansion
UpperCamelCase : Optional[Any] = finegrained_output
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : Tuple = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase : Dict = classifier_dropout_prob
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = scope
def snake_case_ ( self ) -> Any:
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> List[str]:
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, depth_divisible_by=self.depth_divisible_by, min_depth=self.min_depth, expand_ratio=self.expand_ratio, output_stride=self.output_stride, first_layer_is_expansion=self.first_layer_is_expansion, finegrained_output=self.finegrained_output, hidden_act=self.hidden_act, tf_padding=self.tf_padding, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Tuple = MobileNetVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
self.parent.assertEqual(
result.pooler_output.shape, (self.batch_size, self.last_hidden_size), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : List[Any] = self.num_labels
UpperCamelCase : Any = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[Any] = self.num_labels
UpperCamelCase : Optional[int] = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
UpperCamelCase : str = config_and_inputs
UpperCamelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Dict = False
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[Any] = MobileNetVaModelTester(self )
UpperCamelCase : str = MobileNetVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def snake_case_ ( self ) -> List[Any]:
pass
def snake_case_ ( self ) -> int:
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Any = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : int = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : List[Any] = outputs.hidden_states
UpperCamelCase : List[str] = 16
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> int:
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Dict:
UpperCamelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Any = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = self.default_image_processor
UpperCamelCase : Optional[int] = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase : Tuple = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase : Dict = prepare_img()
UpperCamelCase : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Dict = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = outputs.logits
# verify the logits
UpperCamelCase : Tuple = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 365
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__UpperCAmelCase = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=a__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase__ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : List[Any] = {}
if self.train_dir is not None:
UpperCamelCase : Any = self.train_dir
if self.validation_dir is not None:
UpperCamelCase : Union[str, Any] = self.validation_dir
UpperCamelCase : List[str] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
UpperCAmelCase__ : str = field(
default=a__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(a__ )} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=a__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=a__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=a__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=a__ , metadata={"help": "Stride to use for the encoder."} , )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_=192, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=0.6 ) -> Optional[Any]:
UpperCamelCase : List[Any] = input_size
UpperCamelCase : Any = mask_patch_size
UpperCamelCase : Tuple = model_patch_size
UpperCamelCase : Optional[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
UpperCamelCase : Tuple = self.input_size // self.mask_patch_size
UpperCamelCase : int = self.mask_patch_size // self.model_patch_size
UpperCamelCase : Union[str, Any] = self.rand_size**2
UpperCamelCase : str = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> str:
UpperCamelCase : Union[str, Any] = np.random.permutation(self.token_count )[: self.mask_count]
UpperCamelCase : Tuple = np.zeros(self.token_count, dtype=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = 1
UpperCamelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
UpperCamelCase : str = mask.repeat(self.scale, axis=0 ).repeat(self.scale, axis=1 )
return torch.tensor(mask.flatten() )
def UpperCamelCase ( snake_case__ : int ) -> int:
UpperCamelCase : List[Any] = torch.stack([example['pixel_values'] for example in examples] )
UpperCamelCase : Optional[Any] = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCamelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase : Optional[int] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase : Tuple = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
UpperCamelCase : List[str] = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase : str = split['train']
UpperCamelCase : Any = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase : Tuple = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(snake_case__ , 'decoder_type' ):
UpperCamelCase : Tuple = 'simmim'
# adapt config
UpperCamelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCamelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCamelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
UpperCamelCase : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
UpperCamelCase : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCamelCase : Dict = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(snake_case__ )
if training_args.do_train:
UpperCamelCase : Optional[int] = ds['train'].column_names
else:
UpperCamelCase : Optional[int] = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase : Dict = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase : Union[str, Any] = 'image'
elif "img" in column_names:
UpperCamelCase : int = 'img'
else:
UpperCamelCase : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCamelCase : Optional[int] = Compose(
[
Lambda(lambda snake_case__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCamelCase : Optional[int] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(snake_case__ : List[Any] ):
UpperCamelCase : Any = [transforms(snake_case__ ) for image in examples[image_column_name]]
UpperCamelCase : Tuple = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase : Tuple = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase : str = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Initialize our trainer
UpperCamelCase : Any = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
UpperCamelCase : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase : Tuple = last_checkpoint
UpperCamelCase : List[Any] = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
UpperCamelCase : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main()
| 103
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.