code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase : Any = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase : List[str] = logging.WARNING
def lowerCamelCase__ ( ):
snake_case : Tuple = os.getenv("""DATASETS_VERBOSITY""" , __lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def lowerCamelCase__ ( ):
return __name__.split(""".""" )[0]
def lowerCamelCase__ ( ):
return logging.getLogger(_get_library_name() )
def lowerCamelCase__ ( ):
# Apply our default configuration to the library root logger.
snake_case : Union[str, Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowerCamelCase__ ( ):
snake_case : Dict = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowerCamelCase__ ( __lowercase = None ):
if name is None:
snake_case : List[Any] = _get_library_name()
return logging.getLogger(__lowercase )
def lowerCamelCase__ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowerCamelCase__ ( __lowercase ):
_get_library_root_logger().setLevel(__lowercase )
def lowerCamelCase__ ( ):
return set_verbosity(__lowercase )
def lowerCamelCase__ ( ):
return set_verbosity(__lowercase )
def lowerCamelCase__ ( ):
return set_verbosity(__lowercase )
def lowerCamelCase__ ( ):
return set_verbosity(__lowercase )
def lowerCamelCase__ ( ):
snake_case : int = False
def lowerCamelCase__ ( ):
snake_case : Optional[Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _a :
'''simple docstring'''
def __init__( self ,*__a ,**__a ) -> List[str]: # pylint: disable=unused-argument
snake_case : Tuple = args[0] if args else None
def __iter__( self ) -> Optional[int]:
return iter(self._iterator )
def __getattr__( self ,__a ) -> Any:
def empty_fn(*__a ,**__a ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
return self
def __exit__( self ,__a ,__a ,__a ) -> Tuple:
return
lowercase : Optional[Any] = True
class _a :
'''simple docstring'''
def __call__( self ,*__a ,__a=False ,**__a ) -> str:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__a ,**__a )
else:
return EmptyTqdm(*__a ,**__a )
def snake_case_ ( self ,*__a ,**__a ) -> int:
snake_case : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__a ,**__a )
def snake_case_ ( self ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase : Optional[Any] = _tqdm_cls()
def lowerCamelCase__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowerCamelCase__ ( ):
global _tqdm_active
snake_case : str = True
def lowerCamelCase__ ( ):
global _tqdm_active
snake_case : Optional[int] = False
| 116
|
'''simple docstring'''
import numpy as np
lowercase : str = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class _a :
'''simple docstring'''
def __init__( self ) -> None:
snake_case : Dict = np.array(__a )
def snake_case_ ( self ,__a ) -> np.ndarray:
snake_case , snake_case : Optional[Any] = np.where(letter == self.SQUARE )
snake_case : List[Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case_ ( self ,__a ,__a ) -> str:
snake_case : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case_ ( self ,__a ) -> str:
snake_case : List[Any] = message.lower()
snake_case : Dict = message.replace(""" """ ,"""""" )
snake_case : List[str] = message.replace("""j""" ,"""i""" )
snake_case : Optional[int] = np.empty((2, len(__a )) )
for letter_index in range(len(__a ) ):
snake_case : Tuple = self.letter_to_numbers(message[letter_index] )
snake_case : Optional[Any] = numbers[0]
snake_case : List[str] = numbers[1]
snake_case : List[str] = first_step.reshape(2 * len(__a ) )
snake_case : List[Any] = """"""
for numbers_index in range(len(__a ) ):
snake_case : List[str] = int(second_step[numbers_index * 2] )
snake_case : List[Any] = int(second_step[(numbers_index * 2) + 1] )
snake_case : str = self.numbers_to_letter(__a ,__a )
snake_case : Any = encoded_message + letter
return encoded_message
def snake_case_ ( self ,__a ) -> str:
snake_case : Any = message.lower()
message.replace(""" """ ,"""""" )
snake_case : Dict = np.empty(2 * len(__a ) )
for letter_index in range(len(__a ) ):
snake_case : str = self.letter_to_numbers(message[letter_index] )
snake_case : Any = numbers[0]
snake_case : int = numbers[1]
snake_case : List[str] = first_step.reshape((2, len(__a )) )
snake_case : Dict = """"""
for numbers_index in range(len(__a ) ):
snake_case : Optional[Any] = int(second_step[0, numbers_index] )
snake_case : Optional[int] = int(second_step[1, numbers_index] )
snake_case : Tuple = self.numbers_to_letter(__a ,__a )
snake_case : int = decoded_message + letter
return decoded_message
| 116
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = u
for i in range(1 , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE:Dict = temp * (u - i)
return temp
def A_ ( ):
SCREAMING_SNAKE_CASE:Union[str, Any] = int(input("enter the numbers of values: " ) )
SCREAMING_SNAKE_CASE:List[Any] = []
for _ in range(__UpperCAmelCase ):
y.append([] )
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
y[i].append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE:List[Any] = 0
print("enter the values of parameters in a list: " )
SCREAMING_SNAKE_CASE:Any = list(map(__UpperCAmelCase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE:List[str] = float(input() )
SCREAMING_SNAKE_CASE:int = int(input("enter the value to interpolate: " ) )
SCREAMING_SNAKE_CASE:str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __UpperCAmelCase ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE:Any = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE:Union[str, Any] = y[0][0]
for i in range(1 , __UpperCAmelCase ):
summ += (ucal(__UpperCAmelCase , __UpperCAmelCase ) * y[0][i]) / math.factorial(__UpperCAmelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 713
|
'''simple docstring'''
from math import factorial
def A_ ( snake_case , snake_case ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(snake_case ) // (factorial(snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 465
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir("fixtures/test_sentencepiece.model")
__a = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__a = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = CamembertTokenizer
lowercase = CamembertTokenizerFast
lowercase = True
lowercase = True
def lowerCamelCase ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : List[str] = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[Any] = """<pad>"""
snake_case__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 1_004 )
def lowerCamelCase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Dict = CamembertTokenizer(snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
snake_case__ : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case__ : Tuple = """I was born in 92000, and this is falsé."""
snake_case__ : Optional[Any] = tokenizer.encode(snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Any = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : Optional[int] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Any ):
if not self.test_rust_tokenizer:
return
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : List[Any] = self.get_rust_tokenizer()
snake_case__ : str = """I was born in 92000, and this is falsé."""
snake_case__ : Optional[Any] = tokenizer.tokenize(snake_case_ )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
snake_case__ : Tuple = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.get_rust_tokenizer()
snake_case__ : str = tokenizer.encode(snake_case_ )
snake_case__ : List[Any] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
# fmt: off
snake_case__ : Tuple = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case__ : Optional[int] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=snake_case_ , )
| 374
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
__a = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ["input_ids", "attention_mask"]
lowercase = NllbTokenizer
lowercase = []
lowercase = []
def __init__( self : List[str] , snake_case_ : int=None , snake_case_ : Optional[int]=None , snake_case_ : Dict="<s>" , snake_case_ : Optional[Any]="</s>" , snake_case_ : Union[str, Any]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Tuple="<pad>" , snake_case_ : Any="<mask>" , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Any = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
snake_case__ : str = legacy_behaviour
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , additional_special_tokens=snake_case_ , legacy_behaviour=snake_case_ , **snake_case_ , )
snake_case__ : Optional[Any] = vocab_file
snake_case__ : str = False if not self.vocab_file else True
snake_case__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
snake_case__ : Optional[int] = {
lang_code: self.convert_tokens_to_ids(snake_case_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ : Any = src_lang if src_lang is not None else """eng_Latn"""
snake_case__ : Optional[int] = self.convert_tokens_to_ids(self._src_lang )
snake_case__ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : str , snake_case_ : str ):
snake_case__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase ( self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : Optional[str] , snake_case_ : Optional[str] , **snake_case_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : Any = src_lang
snake_case__ : str = self(snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , **snake_case_ )
snake_case__ : Dict = self.convert_tokens_to_ids(snake_case_ )
snake_case__ : str = tgt_lang_id
return inputs
def lowerCamelCase ( self : int , snake_case_ : List[str] , snake_case_ : str = "eng_Latn" , snake_case_ : Optional[List[str]] = None , snake_case_ : str = "fra_Latn" , **snake_case_ : str , ):
snake_case__ : str = src_lang
snake_case__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def lowerCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : List[str] ):
snake_case__ : List[Any] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : Tuple = []
snake_case__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Tuple = [self.cur_lang_code]
snake_case__ : int = [self.eos_token_id]
snake_case__ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : int , snake_case_ : str ):
snake_case__ : List[str] = self.convert_tokens_to_ids(snake_case_ )
if self.legacy_behaviour:
snake_case__ : int = []
snake_case__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Dict = [self.cur_lang_code]
snake_case__ : Dict = [self.eos_token_id]
snake_case__ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ : Any = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
snake_case__ : Dict = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,)
| 374
| 1
|
def _SCREAMING_SNAKE_CASE ( a ) -> int:
__A : list[list[int]] = [[0 for _ in range(a )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__A : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 , a ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase : str = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 703
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
UpperCAmelCase : Dict = ''''''
UpperCAmelCase : Union[str, Any] = ''''''
UpperCAmelCase : Optional[int] = ''''''
UpperCAmelCase : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ) -> None:
__A , __A : List[Any] = get_dataset(a , a )
print('Processing...' )
__A , __A , __A : Optional[Any] = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__A : Optional[int] = random_chars(32 )
__A : Dict = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
__A : Dict = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Success {index+1}/{len(a )} with {file_name}""" )
__A : int = []
for anno in new_annos[index]:
__A : Any = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a )
with open(F"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> tuple[list, list]:
__A : int = []
__A : List[Any] = []
for label_file in glob.glob(os.path.join(a , '*.txt' ) ):
__A : List[str] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(a ) as in_file:
__A : Tuple = in_file.readlines()
__A : Dict = os.path.join(a , F"""{label_name}.jpg""" )
__A : Dict = []
for obj_list in obj_lists:
__A : int = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( a , a , a = 1 ) -> tuple[list, list, list]:
__A : int = []
__A : Optional[Any] = []
__A : Dict = []
for idx in range(len(a ) ):
__A : Dict = []
__A : Optional[Any] = img_list[idx]
path_list.append(a )
__A : Union[str, Any] = anno_list[idx]
__A : Optional[Any] = cva.imread(a )
if flip_type == 1:
__A : Any = cva.flip(a , a )
for bbox in img_annos:
__A : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__A : Union[str, Any] = cva.flip(a , a )
for bbox in img_annos:
__A : Optional[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( a = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__A : List[Any] = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 77
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
'''simple docstring'''
def __init__( self : Any , __snake_case : Tuple , __snake_case : Optional[Any]=13 , __snake_case : int=30 , __snake_case : str=2 , __snake_case : List[Any]=3 , __snake_case : Any=True , __snake_case : str=True , __snake_case : Dict=32 , __snake_case : Dict=5 , __snake_case : int=4 , __snake_case : List[Any]=37 , __snake_case : List[Any]="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Any=10 , __snake_case : Union[str, Any]=0.02 , __snake_case : Any=None , __snake_case : Dict=2 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 1
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Optional[Any] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any ):
UpperCAmelCase_ = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Dict , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
UpperCAmelCase_ = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[str] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Dict = False
lowerCAmelCase : int = False
lowerCAmelCase : List[str] = False
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = ViTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__snake_case )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCamelCase_ ( self : int ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__snake_case )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**__snake_case )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
UpperCAmelCase_ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self : Dict ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
UpperCAmelCase_ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__snake_case )
UpperCAmelCase_ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_80 )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
UpperCAmelCase_ = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__snake_case , return_tensors='''pt''' )
UpperCAmelCase_ = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ = model(__snake_case )
| 144
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str = "laptop" ) -> DataFrame:
UpperCAmelCase_ = f'https://www.amazon.in/laptop/s?k={product}'
UpperCAmelCase_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
UpperCAmelCase_ = BeautifulSoup(requests.get(__UpperCamelCase , headers=__UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
UpperCAmelCase_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
UpperCAmelCase_ = item.ha.text
UpperCAmelCase_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
UpperCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
UpperCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
UpperCAmelCase_ = '''Not available'''
try:
UpperCAmelCase_ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
UpperCAmelCase_ = ''''''
try:
UpperCAmelCase_ = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
UpperCAmelCase_ = float('''nan''' )
except AttributeError:
pass
UpperCAmelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
UpperCAmelCase_ = ''' '''
UpperCAmelCase_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowerCamelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 144
| 1
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =[
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **lowerCamelCase ) ->List[Any]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a = deprecated_arg[3:]
setattr(self , lowerCamelCase , not kwargs.pop(lowerCamelCase ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
__a = kwargs.pop('torchscript' , self.torchscript )
__a = kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
__a = kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**lowerCamelCase )
__a =field(default=_lowerCAmelCase , metadata={"help": "Trace the models using torchscript"} )
__a =field(default=_lowerCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__a =field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __UpperCamelCase ( self ) ->Tuple["torch.device", int]:
'''simple docstring'''
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
__a = torch.device('cpu' )
__a = 0
elif is_torch_tpu_available():
__a = xm.xla_device()
__a = 0
else:
__a = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
__a = torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCamelCase ( self ) ->"torch.device":
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
return self.n_gpu > 0
| 270
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase :
def __init__( self :Dict , _lowercase :Optional[Any] , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = "gelu"
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 5_12
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :str , _lowercase :Optional[Any] , _lowercase :int , _lowercase :Optional[int] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = TFEsmModel(config=_lowercase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase__ = model(_lowercase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :List[Any] , _lowercase :List[Any] , _lowercase :Optional[int] , _lowercase :Any , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :Tuple , _lowercase :Tuple , _lowercase :str , ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TFEsmModel(config=_lowercase )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
lowercase__ = model(_lowercase )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(_lowercase , encoder_hidden_states=_lowercase )
# Also check the case where encoder outputs are not passed
lowercase__ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :str , _lowercase :List[Any] , _lowercase :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFEsmForMaskedLM(config=_lowercase )
lowercase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :List[Any] , _lowercase :str , _lowercase :Dict , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFEsmForTokenClassification(config=_lowercase )
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = TFEsmModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFEsmModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip("Protein models do not support embedding resizing." )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
@unittest.skip("Protein models do not support embedding resizing." )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_lowercase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ = model.get_bias()
assert isinstance(_lowercase , _lowercase )
for k, v in name.items():
assert isinstance(_lowercase , tf.Variable )
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(_lowercase )[0]
lowercase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
lowercase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(_lowercase )[0]
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( a__):
_lowerCAmelCase = '''decision_transformer'''
_lowerCAmelCase = ['''past_key_values''']
_lowerCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=17, A=4, A=128, A=4096, A=True, A=1, A=1024, A=3, A=1, A=None, A="relu", A=0.1, A=0.1, A=0.1, A=1e-5, A=0.02, A=True, A=True, A=5_0256, A=5_0256, A=False, A=False, **A, ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Optional[Any] = act_dim
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[str] = max_ep_len
lowerCamelCase : List[str] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : Optional[int] = n_positions
lowerCamelCase : Optional[int] = n_layer
lowerCamelCase : Optional[int] = n_head
lowerCamelCase : Optional[int] = n_inner
lowerCamelCase : Tuple = activation_function
lowerCamelCase : int = resid_pdrop
lowerCamelCase : Optional[int] = embd_pdrop
lowerCamelCase : Optional[int] = attn_pdrop
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : int = scale_attn_weights
lowerCamelCase : Tuple = use_cache
lowerCamelCase : Optional[int] = scale_attn_by_inverse_layer_idx
lowerCamelCase : str = reorder_and_upcast_attn
lowerCamelCase : str = bos_token_id
lowerCamelCase : List[str] = eos_token_id
super().__init__(bos_token_id=A, eos_token_id=A, **A )
| 710
|
'''simple docstring'''
from __future__ import annotations
A = '#'
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict = {}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[int] = trie[char]
lowerCamelCase : Optional[Any] = True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase : int = trie[char]
else:
return []
return self._elements(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for c, v in d.items():
lowerCamelCase : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Any = trie.find_word(UpperCAmelCase__)
return tuple(string + word for word in suffixes)
def UpperCAmelCase ( ):
print(autocomplete_using_trie('de'))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 0
|
from ....utils import logging
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , a__ : List[str] , a__ : List[Any]=None , a__ : Optional[int]=2048 ):
"""simple docstring"""
__snake_case = config.__dict__
__snake_case = modal_hidden_size
if num_labels:
__snake_case = num_labels
| 592
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case_ = concatenate_datasets
snake_case_ = DownloadConfig
snake_case_ = DownloadManager
snake_case_ = DownloadMode
snake_case_ = DownloadConfig
snake_case_ = DownloadMode
snake_case_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592
| 1
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A (snake_case__):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : UNetaDModel , UpperCAmelCase_ : DDPMScheduler , UpperCAmelCase_ : Tuple , ) ->Tuple:
"""simple docstring"""
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) ->Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) ->List[str]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase ( self : Any , UpperCAmelCase_ : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
if type(UpperCAmelCase_ ) is dict:
return {k: self.to_torch(UpperCAmelCase_ ) for k, v in x_in.items()}
elif torch.is_tensor(UpperCAmelCase_ ):
return x_in.to(self.unet.device )
return torch.tensor(UpperCAmelCase_ , device=self.unet.device )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) ->int:
"""simple docstring"""
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , UpperCAmelCase_ , device=self.unet.device , dtype=torch.long )
for _ in range(UpperCAmelCase_ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(UpperCAmelCase_ )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase_ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , predict_epsilon=UpperCAmelCase_ )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
snake_case_ = self.to_torch(UpperCAmelCase_ )
return x, y
def __call__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : str=0.1 ) ->str:
"""simple docstring"""
snake_case_ = self.normalize(UpperCAmelCase_ , """observations""" )
snake_case_ = obs[None].repeat(UpperCAmelCase_ , axis=0 )
snake_case_ = {0: self.to_torch(UpperCAmelCase_ )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(UpperCAmelCase_ , device=self.unet.device )
snake_case_ = self.reset_xa(UpperCAmelCase_ , UpperCAmelCase_ , self.action_dim )
snake_case_ = self.to_torch(UpperCAmelCase_ )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=UpperCAmelCase_ ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(UpperCAmelCase_ , key="""actions""" )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , UpperCAmelCase_ )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 2
|
"""simple docstring"""
from math import factorial
def _a ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
snake_case_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case_ = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__SCREAMING_SNAKE_CASE : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 2
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A_ : str = logging.getLogger(__name__)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self ):
UpperCamelCase_: Optional[int] = False
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not self.initialized:
UpperCamelCase_: Optional[Any] = RagRetriever(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: str = True
def _a ( self ):
self.retriever.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_ ,UpperCamelCase_: Any = self.retriever._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return doc_ids, retrieved_doc_embeds
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
if index is not None and index.is_initialized() and len(_lowerCamelCase ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
UpperCamelCase_: List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for worker in self.retrieval_workers
] )
def _a ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCamelCase_: Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCamelCase_ ,UpperCamelCase_: str = ray.get(random_worker.retrieve.remote(_lowerCamelCase , _lowerCamelCase ) )
else:
UpperCamelCase_ ,UpperCamelCase_: Dict = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
return super(_lowerCamelCase , cls ).get_tokenizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a ( cls , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ):
UpperCamelCase_: List[str] = kwargs.pop('config' , _lowerCamelCase ) or RagConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = RagTokenizer.from_pretrained(_lowerCamelCase , config=_lowerCamelCase )
UpperCamelCase_: List[str] = rag_tokenizer.question_encoder
UpperCamelCase_: List[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCamelCase_: Union[str, Any] = 'custom'
UpperCamelCase_: int = CustomHFIndex(config.retrieval_vector_size , _lowerCamelCase )
else:
UpperCamelCase_: str = cls._build_index(_lowerCamelCase )
return cls(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , retrieval_workers=_lowerCamelCase , index=_lowerCamelCase , )
| 57
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Dict = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCamelCase__ ):
_UpperCamelCase : Any = AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Any = FlaxAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : List[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Tuple = FlaxRobertaModel.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = tokenizer('Do you support jax jitted function?' ,return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCamelCase__ : Union[str, Any] ):
return model(**lowerCamelCase__ )
eval(**lowerCamelCase__ ).block_until_ready()
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ):
_UpperCamelCase : int = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' ,):
_UpperCamelCase : List[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCamelCase__ ,'Use `from_pt=True` to load this model' ):
_UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 195
| 0
|
'''simple docstring'''
import string
import numpy
def _A ( A__ , A__ ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE : Dict = numpy.vectorize(lambda lowerCamelCase__ : x % 3_6 )
SCREAMING_SNAKE_CASE : Optional[Any] = numpy.vectorize(lowerCamelCase__ )
def __init__( self : Any ,lowercase__ : numpy.ndarray ):
__lowercase = self.modulus(lowercase__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowercase = encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ):
return self.key_string.index(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
return self.key_string[round(lowercase__ )]
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase = det % len(self.key_string )
__lowercase = len(self.key_string )
if greatest_common_divisor(lowercase__ ,len(self.key_string ) ) != 1:
__lowercase = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ):
__lowercase = [char for char in text.upper() if char in self.key_string]
__lowercase = chars[-1]
while len(lowercase__ ) % self.break_key != 0:
chars.append(lowercase__ )
return "".join(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
__lowercase = self.process_text(text.upper() )
__lowercase = ''''''
for i in range(0 ,len(lowercase__ ) - self.break_key + 1 ,self.break_key ):
__lowercase = text[i : i + self.break_key]
__lowercase = [self.replace_letters(lowercase__ ) for char in batch]
__lowercase = numpy.array([vec] ).T
__lowercase = self.modulus(self.encrypt_key.dot(lowercase__ ) ).T.tolist()[
0
]
__lowercase = ''''''.join(
self.replace_digits(lowercase__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowercase = det % len(self.key_string )
__lowercase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowercase = i
break
__lowercase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : str ):
__lowercase = self.make_decrypt_key()
__lowercase = self.process_text(text.upper() )
__lowercase = ''''''
for i in range(0 ,len(lowercase__ ) - self.break_key + 1 ,self.break_key ):
__lowercase = text[i : i + self.break_key]
__lowercase = [self.replace_letters(lowercase__ ) for char in batch]
__lowercase = numpy.array([vec] ).T
__lowercase = self.modulus(decrypt_key.dot(lowercase__ ) ).T.tolist()[0]
__lowercase = ''''''.join(
self.replace_digits(lowercase__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _A ( ):
"""simple docstring"""
__lowercase = int(input('''Enter the order of the encryption key: ''' ) )
__lowercase = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(A__ ):
__lowercase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowercase = HillCipher(numpy.array(A__ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
__lowercase = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
__lowercase = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowercase = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 624
|
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 624
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
__a = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
__a = '</w>'
__a = '@@ '
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
# Speech2Text2 has no max input length
__a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowercase_ = None
lowercase_ = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[:-1]
lowercase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
@property
def _lowercase ( self : Any ) -> int:
return len(self.decoder )
def _lowercase ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = word.replace(SCREAMING_SNAKE_CASE_ , '''''' )
lowercase_ = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = word
return word
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase_ = text.lower()
lowercase_ = text.split()
lowercase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
lowercase_ = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowercase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 97
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
# Construct model
if gpta_config_file == "":
a__ : Union[str, Any] = GPTaConfig()
else:
a__ : Dict = GPTaConfig.from_json_file(lowerCAmelCase__ )
a__ : Optional[int] = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
a__ : int = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
a__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 688
| 0
|
def _a ( __lowercase ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__lowercase , __lowercase ):
raise TypeError('Input value must be a \'int\' type' )
return bin(__lowercase ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 567
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowercase ):
"""simple docstring"""
UpperCAmelCase__ = "gpt_bigcode"
UpperCAmelCase__ = ["past_key_values"]
UpperCAmelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _SCREAMING_SNAKE_CASE=50_257 , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="gelu_pytorch_tanh" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=50_256 , _SCREAMING_SNAKE_CASE=50_256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
__UpperCamelCase = vocab_size
__UpperCamelCase = n_positions
__UpperCamelCase = n_embd
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
__UpperCamelCase = n_inner
__UpperCamelCase = activation_function
__UpperCamelCase = resid_pdrop
__UpperCamelCase = embd_pdrop
__UpperCamelCase = attn_pdrop
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = scale_attn_weights
__UpperCamelCase = use_cache
__UpperCamelCase = attention_softmax_in_fpaa
__UpperCamelCase = scale_attention_softmax_in_fpaa
__UpperCamelCase = multi_query
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 567
| 1
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=14, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=99, lowerCamelCase__=32, lowerCamelCase__=5, lowerCamelCase__=4, lowerCamelCase__=37, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=3, lowerCamelCase__=4, lowerCamelCase__=None, ):
A : Union[str, Any] = parent
A : Optional[Any] = batch_size
A : List[Any] = seq_length
A : Tuple = is_training
A : Optional[Any] = use_token_type_ids
A : List[str] = use_input_mask
A : List[str] = use_labels
A : Any = use_mc_token_ids
A : Optional[Any] = vocab_size
A : str = hidden_size
A : str = num_hidden_layers
A : int = num_attention_heads
A : List[Any] = intermediate_size
A : Dict = hidden_act
A : Any = hidden_dropout_prob
A : int = attention_probs_dropout_prob
A : str = max_position_embeddings
A : Optional[int] = type_vocab_size
A : Union[str, Any] = type_sequence_label_size
A : Dict = initializer_range
A : Union[str, Any] = num_labels
A : Any = num_choices
A : Any = scope
A : Any = self.vocab_size - 1
def _lowerCAmelCase ( self ):
A : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
A : Tuple = None
if self.use_input_mask:
A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
A : Any = None
if self.use_mc_token_ids:
A : Optional[Any] = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
A : Union[str, Any] = None
A : Union[str, Any] = None
A : Any = None
if self.use_labels:
A : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
A : List[Any] = ids_tensor([self.batch_size], self.num_choices )
A : Tuple = self.get_config()
A : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCAmelCase ( self ):
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : int = CTRLModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, head_mask=_lowerCAmelCase )
model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase )
A : Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : str = CTRLLMHeadModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A : Any = model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, labels=_lowerCAmelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self ):
A : Tuple = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Dict = config_and_inputs
A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, *lowerCamelCase__ ):
A : Optional[Any] = self.num_labels
A : str = CTRLForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
A : Dict = model(_lowerCAmelCase, token_type_ids=_lowerCAmelCase, labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowerCamelCase : Optional[int] = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowerCamelCase : List[Any] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict = True
__lowerCamelCase : Union[str, Any] = False
__lowerCamelCase : str = False
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowerCAmelCase ( self ):
A : List[Any] = CTRLModelTester(self )
A : Tuple = ConfigTester(self, config_class=_lowerCAmelCase, n_embd=37 )
def _lowerCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase )
def _lowerCAmelCase ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
pass
@slow
def _lowerCAmelCase ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = CTRLModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _lowerCAmelCase ( self ):
pass
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowerCAmelCase ( self ):
A : Optional[int] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(_lowerCAmelCase )
A : Dict = torch.tensor(
[[1_1859, 0, 1611, 8]], dtype=torch.long, device=_lowerCAmelCase ) # Legal the president is
A : Optional[int] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
A : Union[str, Any] = model.generate(_lowerCAmelCase, do_sample=_lowerCAmelCase )
self.assertListEqual(output_ids[0].tolist(), _lowerCAmelCase )
| 662
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=13 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : str=3 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[int]=[10, 20, 30, 40] , _lowerCAmelCase : Optional[Any]=[2, 2, 3, 2] , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]=37 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : int=0.02 , _lowerCAmelCase : str=["stage2", "stage3", "stage4"] , _lowerCAmelCase : Dict=[2, 3, 4] , _lowerCAmelCase : Tuple=None , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case :List[str] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case :str = True
__snake_case :Any = False
__snake_case :Any = False
__snake_case :Any = False
__snake_case :int = False
def _a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def _a ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def _a ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def _a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
def _a ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCAmelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCAmelCase )
def _a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ):
__lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def snake_case ( ):
'''simple docstring'''
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def _a ( self : Tuple ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(_lowerCAmelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCAmelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
__lowercase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _lowerCAmelCase ):
__snake_case :Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case :str = ConvNextConfig
__snake_case :Optional[Any] = False
def _a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = ConvNextModelTester(self )
| 80
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE ( a__ , a__ ):
'''simple docstring'''
__UpperCamelCase = "swin"
__UpperCamelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=96 , SCREAMING_SNAKE_CASE__=[2, 2, 6, 2] , SCREAMING_SNAKE_CASE__=[3, 6, 12, 24] , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
super().__init__(**_A )
snake_case: str = image_size
snake_case: List[str] = patch_size
snake_case: str = num_channels
snake_case: Optional[Any] = embed_dim
snake_case: Optional[int] = depths
snake_case: Any = len(_A )
snake_case: Union[str, Any] = num_heads
snake_case: Dict = window_size
snake_case: Dict = mlp_ratio
snake_case: Tuple = qkv_bias
snake_case: Any = hidden_dropout_prob
snake_case: List[str] = attention_probs_dropout_prob
snake_case: int = drop_path_rate
snake_case: int = hidden_act
snake_case: Tuple = use_absolute_embeddings
snake_case: List[Any] = layer_norm_eps
snake_case: Optional[Any] = initializer_range
snake_case: Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case: Union[str, Any] = int(embed_dim * 2 ** (len(_A ) - 1) )
snake_case: Optional[Any] = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )]
snake_case: Tuple = get_aligned_output_features_output_indices(
out_features=_A , out_indices=_A , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE ( a__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return 1E-4
| 716
|
'''simple docstring'''
def lowerCAmelCase_ ( __A : List[str] ):
'''simple docstring'''
snake_case: str = [0] * len(__A )
snake_case: Tuple = []
snake_case: Tuple = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
snake_case: int = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
snake_case: Any = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
__UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 692
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase__ ( A_ ):
def __init__( self : Optional[int] ):
# test for the above condition
self.test()
def lowercase ( self : List[str] ):
_snake_case = 0
_snake_case = False
while not completed:
if counter == 1:
self.reset()
_snake_case = self.advance()
if not self.does_advance(_lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_snake_case , _snake_case , _snake_case = self.update(_lowerCamelCase )
counter += 1
if counter > 10000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def lowercase ( self : str ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self : str , _lowerCamelCase : int ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self : Tuple , _lowerCamelCase : int ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self : int ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self : Any ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any]=False ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase__ ( A_ ):
def __init__( self : Union[str, Any] , _lowerCamelCase : List[int] ):
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_snake_case = token_ids
_snake_case = len(self.token_ids )
_snake_case = -1 # the index of the currently fulfilled step
_snake_case = False
def lowercase ( self : List[Any] ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowercase ( self : Any , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowercase ( self : str , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_lowerCamelCase )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(_lowerCamelCase ):
self.fulfilled_idx += 1
_snake_case = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case = True
_snake_case = completed
else:
# failed to make progress.
_snake_case = True
self.reset()
return stepped, completed, reset
def lowercase ( self : int ):
_snake_case = False
_snake_case = 0
def lowercase ( self : Any ):
return self.seqlen - (self.fulfilled_idx + 1)
def lowercase ( self : Tuple , _lowerCamelCase : int=False ):
_snake_case = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.fulfilled_idx
_snake_case = self.completed
return new_constraint
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : List[List[int]] , _lowerCamelCase : int=True ):
_snake_case = max([len(_lowerCamelCase ) for one in nested_token_ids] )
_snake_case = {}
for token_ids in nested_token_ids:
_snake_case = root
for tidx, token_id in enumerate(_lowerCamelCase ):
if token_id not in level:
_snake_case = {}
_snake_case = level[token_id]
if no_subsets and self.has_subsets(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
_snake_case = root
def lowercase ( self : Optional[int] , _lowerCamelCase : List[str] ):
_snake_case = self.trie
for current_token in current_seq:
_snake_case = start[current_token]
_snake_case = list(start.keys() )
return next_tokens
def lowercase ( self : List[Any] , _lowerCamelCase : int ):
_snake_case = self.next_tokens(_lowerCamelCase )
return len(_lowerCamelCase ) == 0
def lowercase ( self : str , _lowerCamelCase : Optional[Any] ):
_snake_case = list(root.values() )
if len(_lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_lowerCamelCase ) for nn in next_nodes] )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : int ):
_snake_case = self.count_leaves(_lowerCamelCase )
return len(_lowerCamelCase ) != leaf_count
class lowerCAmelCase__ ( A_ ):
def __init__( self : Optional[int] , _lowerCamelCase : List[List[int]] ):
super(_lowerCamelCase , self ).__init__()
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_lowerCamelCase , _lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_lowerCamelCase , _lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_snake_case = DisjunctiveTrie(_lowerCamelCase )
_snake_case = nested_token_ids
_snake_case = self.trie.max_height
_snake_case = []
_snake_case = False
def lowercase ( self : Any ):
_snake_case = self.trie.next_tokens(self.current_seq )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def lowercase ( self : Optional[Any] , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}''' )
_snake_case = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowercase ( self : List[str] , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_lowerCamelCase )}''' )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(_lowerCamelCase ):
self.current_seq.append(_lowerCamelCase )
_snake_case = True
else:
_snake_case = True
self.reset()
_snake_case = self.trie.reached_leaf(self.current_seq )
_snake_case = completed
return stepped, completed, reset
def lowercase ( self : str ):
_snake_case = False
_snake_case = []
def lowercase ( self : Tuple ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowercase ( self : Any , _lowerCamelCase : List[str]=False ):
_snake_case = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.current_seq
_snake_case = self.completed
return new_constraint
class lowerCAmelCase__ :
def __init__( self : Any , _lowerCamelCase : List[Constraint] ):
_snake_case = constraints
# max # of steps required to fulfill a given constraint
_snake_case = max([c.seqlen for c in constraints] )
_snake_case = len(_lowerCamelCase )
_snake_case = False
self.init_state()
def lowercase ( self : Optional[int] ):
_snake_case = []
_snake_case = None
_snake_case = [constraint.copy(stateful=_lowerCamelCase ) for constraint in self.constraints]
def lowercase ( self : Tuple ):
_snake_case = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowercase ( self : Optional[int] ):
_snake_case = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case = constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
else:
_snake_case = self.inprogress_constraint.advance()
if isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.append(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
token_list.extend(_lowerCamelCase )
if len(_lowerCamelCase ) == 0:
return None
else:
return token_list
def lowercase ( self : Union[str, Any] , _lowerCamelCase : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case , _snake_case = self.add(_lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowercase ( self : Union[str, Any] , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
_snake_case , _snake_case = False, False
if self.completed:
_snake_case = True
_snake_case = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case , _snake_case , _snake_case = self.inprogress_constraint.update(_lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_lowerCamelCase ) )
_snake_case = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_lowerCamelCase ):
_snake_case , _snake_case , _snake_case = pending_constraint.update(_lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(_lowerCamelCase )
_snake_case = None
if not complete and stepped:
_snake_case = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowercase ( self : List[Any] , _lowerCamelCase : Optional[int]=True ):
_snake_case = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case = [
constraint.copy(stateful=_lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case = self.inprogress_constraint.copy(stateful=_lowerCamelCase )
_snake_case = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 224
|
"""simple docstring"""
from __future__ import annotations
import math
class lowerCAmelCase__ :
def __init__( self : int , _lowerCamelCase : int ):
_snake_case = size
# approximate the overall size of segment tree with given value
_snake_case = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_snake_case = [0 for i in range(0 , 4 * size )]
_snake_case = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase ( self : Optional[Any] , _lowerCamelCase : int ):
return idx * 2
def lowercase ( self : Dict , _lowerCamelCase : int ):
return idx * 2 + 1
def lowercase ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] ):
if left_element == right_element:
_snake_case = a[left_element - 1]
else:
_snake_case = (left_element + right_element) // 2
self.build(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.build(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
def lowercase ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_snake_case = val
if left_element != right_element:
_snake_case = val
_snake_case = val
_snake_case = True
_snake_case = True
return True
_snake_case = (left_element + right_element) // 2
self.update(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.update(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = max(
self.segment_tree[self.left(_lowerCamelCase )] , self.segment_tree[self.right(_lowerCamelCase )] )
return True
def lowercase ( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
if self.flag[idx] is True:
_snake_case = self.lazy[idx]
_snake_case = False
if left_element != right_element:
_snake_case = self.lazy[idx]
_snake_case = self.lazy[idx]
_snake_case = True
_snake_case = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_snake_case = (left_element + right_element) // 2
_snake_case = self.query(self.left(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_snake_case = self.query(self.right(_lowerCamelCase ) , mid + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return max(_lowerCamelCase , _lowerCamelCase )
def __str__( self : List[Any] ):
return str([self.query(1 , 1 , self.size , _lowerCamelCase , _lowerCamelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
UpperCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
UpperCAmelCase__ = 15
UpperCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 224
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase :Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase :int = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = "yolos"
def __init__( self : Tuple , snake_case : int=768 , snake_case : Tuple=12 , snake_case : List[str]=12 , snake_case : str=3072 , snake_case : List[str]="gelu" , snake_case : Union[str, Any]=0.0 , snake_case : Optional[int]=0.0 , snake_case : Dict=0.02 , snake_case : Optional[int]=1E-12 , snake_case : Tuple=[512, 864] , snake_case : Any=16 , snake_case : Union[str, Any]=3 , snake_case : Any=True , snake_case : int=100 , snake_case : List[str]=True , snake_case : Optional[int]=False , snake_case : int=1 , snake_case : Tuple=5 , snake_case : Optional[Any]=2 , snake_case : List[Any]=5 , snake_case : Tuple=2 , snake_case : str=0.1 , **snake_case : List[str] , ) -> str:
super().__init__(**snake_case )
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Tuple = intermediate_size
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : List[Any] = num_channels
__UpperCAmelCase : List[str] = qkv_bias
__UpperCAmelCase : Any = num_detection_tokens
__UpperCAmelCase : List[str] = use_mid_position_embeddings
__UpperCAmelCase : Any = auxiliary_loss
# Hungarian matcher
__UpperCAmelCase : Dict = class_cost
__UpperCAmelCase : int = bbox_cost
__UpperCAmelCase : List[str] = giou_cost
# Loss coefficients
__UpperCAmelCase : Dict = bbox_loss_coefficient
__UpperCAmelCase : Optional[Any] = giou_loss_coefficient
__UpperCAmelCase : Tuple = eos_coefficient
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase__ ( self : Dict ) -> float:
return 1E-4
@property
def lowerCamelCase__ ( self : Any ) -> int:
return 12
| 709
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = JukeboxTokenizer
SCREAMING_SNAKE_CASE : Tuple = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
import torch
__UpperCAmelCase : List[str] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__UpperCAmelCase : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
import torch
__UpperCAmelCase : Optional[int] = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__UpperCAmelCase : List[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase : List[str] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 266
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return str(A ) == str(A )[::-1]
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return int(A ) + int(str(A )[::-1] )
def UpperCAmelCase_ ( A = 1_0_0_0_0 ):
'''simple docstring'''
_a : Union[str, Any] = []
for num in range(1 , A ):
_a : str = 0
_a : Optional[Any] = num
while iterations < 5_0:
_a : Dict = sum_reverse(A )
iterations += 1
if is_palindrome(A ):
break
else:
lychrel_nums.append(A )
return len(A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 120
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
'''simple docstring'''
def __init__( self , _a , _a=14 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=4 , _a=4 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=0.02 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = rotary_dim
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = vocab_size - 1
SCREAMING_SNAKE_CASE__ : str = vocab_size - 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_size - 1
def _a ( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _a ( self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : int = model_class_name(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = model.init_cache(input_ids.shape[0] , _a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ : Any = model(
input_ids[:, :-1] , attention_mask=_a , past_key_values=_a , position_ids=_a , )
SCREAMING_SNAKE_CASE__ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : str = model(
input_ids[:, -1:] , attention_mask=_a , past_key_values=outputs_cache.past_key_values , position_ids=_a , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_a )
SCREAMING_SNAKE_CASE__ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _a ( self , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 20
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class_name(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : Any = model.init_cache(input_ids.shape[0] , _a )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ : Dict = model(
input_ids[:, :-1] , attention_mask=_a , past_key_values=_a , position_ids=_a , )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_a , position_ids=_a , )
SCREAMING_SNAKE_CASE__ : Dict = model(_a , attention_mask=_a )
SCREAMING_SNAKE_CASE__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE :Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = FlaxGPTJModelTester(self )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_a , _a , _a , _a )
def _a ( self ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_a , _a , _a , _a )
@tooslow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=_a , truncation=_a )
SCREAMING_SNAKE_CASE__ : Tuple = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ : Optional[int] = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.batch_decode(_a , skip_special_tokens=_a )
SCREAMING_SNAKE_CASE__ : List[str] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(_a , _a )
@is_pt_flax_cross_test
def _a ( self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_a , _a )
SCREAMING_SNAKE_CASE__ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ : int = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Any = 1
SCREAMING_SNAKE_CASE__ : Tuple = pt_model_class(_a ).eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_a , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : List[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _a )
SCREAMING_SNAKE_CASE__ : List[str] = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model(**_a ).to_tuple()
SCREAMING_SNAKE_CASE__ : Optional[Any] = fx_model(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_a , _a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = model_class.from_pretrained(_a , from_pt=_a )
SCREAMING_SNAKE_CASE__ : List[str] = fx_model_loaded(**_a ).to_tuple()
self.assertEqual(
len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(_a , _a ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ : List[Any] = self._prepare_for_class(_a , _a )
SCREAMING_SNAKE_CASE__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(_a , _a )
SCREAMING_SNAKE_CASE__ : int = pt_model_class(_a ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_a , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = load_flax_weights_in_pytorch_model(_a , fx_model.params )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_a ):
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = pt_model(**_a ).to_tuple()
SCREAMING_SNAKE_CASE__ : Any = fx_model(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_a , _a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_model_class.from_pretrained(_a , from_flax=_a )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = pt_model_loaded(**_a ).to_tuple()
self.assertEqual(
len(_a ) , len(_a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(_a , _a ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _a ( self ) -> Tuple:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 712
|
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(__lowerCAmelCase ) + 1
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
SCREAMING_SNAKE_CASE__ : Dict = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
# since string of zero length match pattern of zero length
SCREAMING_SNAKE_CASE__ : Dict = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowerCAmelCase ):
for j in range(1 , __lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
SCREAMING_SNAKE_CASE__ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
SCREAMING_SNAKE_CASE__ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
SCREAMING_SNAKE_CASE__ : List[Any] = dp[i - 1][j]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
else:
SCREAMING_SNAKE_CASE__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a :Any = "aab"
a :Optional[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'{input_string} matches the given pattern {pattern}')
else:
print(f'{input_string} does not match with the given pattern {pattern}')
| 12
| 0
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : Optional[Any] , a : Dict=3 , a : Optional[int]=32 , a : Any=3 , a : Optional[Any]=10 , a : List[str]=[10, 20, 30, 40] , a : Dict=[1, 1, 2, 1] , a : Dict=True , a : int=True , a : List[Any]="relu" , a : List[str]=3 , a : List[Any]=None , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : str = embeddings_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : Optional[Any] = depths
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : int = scope
SCREAMING_SNAKE_CASE : str = len(a )
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __UpperCamelCase ( self : Optional[int] , a : Optional[int] , a : Optional[int] , a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFRegNetModel(config=a )
SCREAMING_SNAKE_CASE : int = model(a , training=a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Dict , a : Any , a : List[str] , a : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFRegNetForImageClassification(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(a , labels=a , training=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowerCamelCase__ =(
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetModelTester(self )
SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=a , has_text_modality=a )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Optional[Any] = model_class(a )
SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def check_hidden_states_output(a : Dict , a : Optional[int] , a : Any ):
SCREAMING_SNAKE_CASE : str = model_class(a )
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(a , a ) , training=a )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.num_stages
self.assertEqual(len(a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE : int = layer_type
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(a , a , a )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a : int , a : Union[str, Any] , a : int , a : Dict={} ):
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , return_dict=a , **a )
SCREAMING_SNAKE_CASE : Any = model(a , return_dict=a , **a ).to_tuple()
def recursive_check(a : Optional[Any] , a : Tuple ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a , a ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(a )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
SCREAMING_SNAKE_CASE : str = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {"output_hidden_states": True} )
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(a , a , return_labels=a )
SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {"output_hidden_states": True} )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = TFRegNetModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE : List[str] = self.default_image_processor
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=a , return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE : Tuple = model(**a , training=a )
# verify the logits
SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] , a , atol=1e-4 )
| 25
|
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __lowercase (_lowercase ) -> Optional[Any]:
"""simple docstring"""
if not is_accelerate_available():
return method
__lowerCamelCase : Optional[int] = version.parse(accelerate.__version__ ).base_version
if version.parse(_lowercase ) < version.parse("""0.17.0""" ):
return method
def wrapper(self, *_lowercase, **_lowercase ):
if hasattr(self, """_hf_hook""" ) and hasattr(self._hf_hook, """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self, *_lowercase, **_lowercase )
return wrapper
| 150
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
lowercase__ : Tuple = RobertaTokenizer
def __init__( self : Dict , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : Tuple=None , lowercase : Optional[int]="replace" , lowercase : Union[str, Any]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Union[str, Any]="</s>" , lowercase : List[Any]="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : List[str]="<mask>" , lowercase : Optional[Any]=False , lowercase : Dict=True , **lowercase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase )
__lowercase = add_prefix_space
__lowercase = """post_processor"""
__lowercase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
__lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase = tuple(state["""sep"""] )
if "cls" in state:
__lowercase = tuple(state["""cls"""] )
__lowercase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = add_prefix_space
__lowercase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
__lowercase = trim_offsets
__lowercase = True
if changes_to_apply:
__lowercase = getattr(lowercase , state.pop("""type""" ) )
__lowercase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Any , lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
__lowercase = value
def snake_case__ ( self : List[Any] , *lowercase : Tuple , **lowercase : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : Any , *lowercase : int , **lowercase : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : int , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
__lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 718
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Tuple=32 , lowercase : Optional[Any]=2 , lowercase : Tuple=3 , lowercase : Tuple=16 , lowercase : Tuple=[1, 2, 1] , lowercase : Optional[Any]=[2, 2, 4] , lowercase : Dict=2 , lowercase : Optional[int]=2.0 , lowercase : List[Any]=True , lowercase : str=0.0 , lowercase : Any=0.0 , lowercase : Optional[int]=0.1 , lowercase : int="gelu" , lowercase : Tuple=False , lowercase : Optional[Any]=True , lowercase : int=0.02 , lowercase : Union[str, Any]=1E-5 , lowercase : Dict=True , lowercase : Any=None , lowercase : str=True , lowercase : str=10 , lowercase : Dict=8 , lowercase : int=["stage1", "stage2", "stage3"] , lowercase : Optional[int]=[1, 2, 3] , ) -> Any:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
__lowercase = mlp_ratio
__lowercase = qkv_bias
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_absolute_embeddings
__lowercase = patch_norm
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = is_training
__lowercase = scope
__lowercase = use_labels
__lowercase = type_sequence_label_size
__lowercase = encoder_stride
__lowercase = out_features
__lowercase = out_indices
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[str] ) -> int:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case__ ( self : Any , lowercase : List[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModel(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
__lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowercase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Any , lowercase : Tuple , lowercase : Any , lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinBackbone(config=lowercase )
model.to(lowercase )
model.eval()
__lowercase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowercase ):
__lowercase = ["""stem"""]
__lowercase = MaskFormerSwinBackbone(config=lowercase )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowercase__ : List[str] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
lowercase__ : List[str] = False
lowercase__ : int = False
lowercase__ : int = False
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = False
def snake_case__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
__lowercase = ConfigTester(self , config_class=lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
return
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def snake_case__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def snake_case__ ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def snake_case__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase , lowercase ) )
__lowercase = outputs.hidden_states
__lowercase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase ) , lowercase )
# Swin has a different seq_length
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : int ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , lowercase )
def snake_case__ ( self : int ) -> str:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = 3
__lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowercase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
self.check_hidden_states_output(lowercase , lowercase , lowercase , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def snake_case__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def snake_case__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def snake_case__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowercase : Optional[int] ):
__lowercase = 0
return t
def check_equivalence(lowercase : Optional[int] , lowercase : str , lowercase : str , lowercase : Tuple={} ):
with torch.no_grad():
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase )
__lowercase = model(**lowercase , return_dict=lowercase , **lowercase ).to_tuple()
def recursive_check(lowercase : int , lowercase : Optional[Any] ):
if isinstance(lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ):
recursive_check(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowercase , lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowercase ) , set_nan_tensor_to_zero(lowercase ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}. Dict has"
F" `nan`: {torch.isnan(lowercase ).any()} and `inf`: {torch.isinf(lowercase )}."
) , )
recursive_check(lowercase , lowercase )
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase )
model.to(lowercase )
model.eval()
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
__lowercase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
check_equivalence(lowercase , lowercase , lowercase , {"""output_hidden_states""": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowercase__ : Any = MaskFormerSwinConfig
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MaskFormerSwinModelTester(self )
def snake_case__ ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__lowercase = backbone_class(lowercase )
backbone.to(lowercase )
backbone.eval()
__lowercase = backbone(**lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowercase = backbone(**lowercase , output_hidden_states=lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowercase , __lowercase , __lowercase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowercase = backbone(**lowercase , output_attentions=lowercase )
self.assertIsNotNone(outputs.attentions )
| 634
| 0
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCAmelCase_ : Optional[int] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _UpperCamelCase (_lowerCamelCase : Any )-> int:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def _UpperCamelCase (_lowerCamelCase : int )-> Tuple:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__snake_case = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
| 24
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=18 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=400 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , ) -> Tuple:
'''simple docstring'''
__snake_case = size if size is not None else {'''shortest_edge''': 20}
__snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_flip_channel_order
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Union[str, Any] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop''' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_flip_channel_order''' ) )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24
| 1
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase (_snake_case ,_snake_case = 0.0 ,_snake_case = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Optional[int] = ['audio_values', 'audio_mask']
def __init__( self : Dict , A_ : Optional[int]=20_48 , A_ : Union[str, Any]=1 , A_ : Dict=[16, 16] , A_ : Optional[Any]=1_28 , A_ : str=4_41_00 , A_ : Optional[int]=86 , A_ : Tuple=20_48 , A_ : Union[str, Any]=0.0 , **A_ : List[str] , )-> Optional[Any]:
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
__UpperCamelCase = spectrogram_length
__UpperCamelCase = num_channels
__UpperCamelCase = patch_size
__UpperCamelCase = feature_size // self.patch_size[1]
__UpperCamelCase = n_fft
__UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = padding_value
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def A ( self : Union[str, Any] , A_ : np.array )-> np.ndarray:
__UpperCamelCase = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
__UpperCamelCase = log_spec[:, :-1]
__UpperCamelCase = log_spec - 20.0
__UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[bool] = True , A_ : Optional[int] = None , A_ : bool = False , A_ : bool = False , **A_ : str , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__UpperCamelCase = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCamelCase = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
__UpperCamelCase = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
__UpperCamelCase = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__UpperCamelCase = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
__UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__UpperCamelCase = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
__UpperCamelCase = audio_features[i]
__UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
__UpperCamelCase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
__UpperCamelCase = {"audio_values": padded_audio_features}
__UpperCamelCase = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 228
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase_ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModel)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase_ ( _BaseAutoModelClass ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 173
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 20
lowerCamelCase__ = self._get_uniform_logits(batch_size=2 ,length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase__ = jax.nn.softmax(_lowerCAmelCase ,axis=-1 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
lowerCamelCase__ = jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase ,scores.copy() ,cur_len=_lowerCAmelCase ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create ramp distribution
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCamelCase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, length) ).copy()
lowerCamelCase__ = top_k_warp_safety_check(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = None
lowerCamelCase__ = 10
lowerCamelCase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase__ = np.exp(top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase__ = np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase__ = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
lowerCamelCase__ = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCamelCase__ = 5
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = 15
lowerCamelCase__ = min_dist_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase__ = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCamelCase__ = 1
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 20
lowerCamelCase__ = 4
lowerCamelCase__ = 0
lowerCamelCase__ = 5
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase__ = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCamelCase__ = 4
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase__ = 3
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = logits_processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# with processor list
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = 4
lowerCamelCase__ = 10
lowerCamelCase__ = 15
lowerCamelCase__ = 2
lowerCamelCase__ = 1
lowerCamelCase__ = 15
# dummy input_ids and scores
lowerCamelCase__ = ids_tensor((batch_size, sequence_length) ,_lowerCAmelCase )
lowerCamelCase__ = input_ids.copy()
lowerCamelCase__ = self._get_uniform_logits(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = scores.copy()
# instantiate all dist processors
lowerCamelCase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase__ = FlaxTopKLogitsWarper(3 )
lowerCamelCase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase__ = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
lowerCamelCase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase )
lowerCamelCase__ = 10
# no processor list
def run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = temp_dist_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_k_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = top_p_warp(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = min_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = bos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
lowerCamelCase__ = eos_dist_proc(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase__ = processor(_lowerCAmelCase ,_lowerCAmelCase ,cur_len=_lowerCAmelCase )
return scores
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jax.jit(_lowerCAmelCase )
lowerCamelCase__ = jitted_run_no_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = jitted_run_processor_list(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase ,_lowerCAmelCase ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 50
| 0
|
"""simple docstring"""
from math import isqrt
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(SCREAMING_SNAKE_CASE ) + 1 ) )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 10**6 ) -> int:
"""simple docstring"""
__snake_case = 0
__snake_case = 1
__snake_case = 7
while prime_candidate < max_prime:
primes_count += is_prime(SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 614
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MobileViTFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 614
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_lowerCamelCase: bool = field(
default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''whether to use adafactor'''} )
_lowerCamelCase: Optional[float] = field(
default=_lowercase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(
default=_lowercase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(default=_lowercase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(
default=_lowercase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[str] = field(
default='''linear''' , metadata={'''help''': F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
| 91
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """rwkv"""
SCREAMING_SNAKE_CASE_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self ,_SCREAMING_SNAKE_CASE=50_277 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 185
| 0
|
import os
import numpy
import onnx
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
__a : Optional[Any] = a.name
__a : int = b.name
__a : Dict = ''''''
__a : List[str] = ''''''
__a : Optional[Any] = a == b
__a : Tuple = name_a
__a : Any = name_b
return res
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ):
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ):
__a : int = list(model.graph.initializer )
__a : Dict = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a : Dict = inits[i].name
__a : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __UpperCamelCase ( lowerCAmelCase__ : Dict ):
__a : Optional[int] = os.path.dirname(a__ )
__a : str = os.path.basename(a__ )
__a : str = onnx.load(os.path.join(a__ , a__ ) )
__a : str = list(model.graph.initializer )
__a : int = set()
__a : Union[str, Any] = {}
__a : Optional[Any] = []
__a : Union[str, Any] = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a : Dict = inits[j].data_type
__a : int = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a : Optional[int] = inits[i].name
__a : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a : Optional[int] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , '''GB''' )
__a : Union[str, Any] = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a : Tuple = '''optimized_''' + model_file_name
__a : Union[str, Any] = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model
| 714
|
from collections.abc import Callable
import numpy as np
def __UpperCamelCase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ):
__a : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
__a : str = np.zeros((n + 1,) )
__a : List[Any] = ya
__a : str = xa
for k in range(lowerCAmelCase__ ):
__a : int = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
__a : List[str] = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209
|
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
UpperCamelCase_ = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
UpperCamelCase_ = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
UpperCamelCase_ = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def __lowerCamelCase ( self : Any ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __lowerCamelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : str=False ) -> Optional[Any]:
if rouge_types is None:
SCREAMING_SNAKE_CASE__ :Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
SCREAMING_SNAKE_CASE__ :Union[str, Any] = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase_ , use_stemmer=UpperCamelCase_ )
if use_aggregator:
SCREAMING_SNAKE_CASE__ :Tuple = scoring.BootstrapAggregator()
else:
SCREAMING_SNAKE_CASE__ :Any = []
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ :Tuple = scorer.score(UpperCamelCase_ , UpperCamelCase_ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase_ )
else:
scores.append(UpperCamelCase_ )
if use_aggregator:
SCREAMING_SNAKE_CASE__ :Tuple = aggregator.aggregate()
else:
SCREAMING_SNAKE_CASE__ :List[str] = {}
for key in scores[0]:
SCREAMING_SNAKE_CASE__ :int = [score[key] for score in scores]
return result
| 209
| 1
|
def lowerCAmelCase__ ( a__ ) ->Optional[Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_UpperCamelCase = 0
_UpperCamelCase = str(UpperCamelCase__ )
while len(UpperCamelCase__ ) != 1:
_UpperCamelCase = [int(UpperCamelCase__ ) for i in num_string]
_UpperCamelCase = 1
for i in range(0 , len(UpperCamelCase__ ) ):
total *= numbers[i]
_UpperCamelCase = str(UpperCamelCase__ )
steps += 1
return steps
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_UpperCamelCase = 0
_UpperCamelCase = str(UpperCamelCase__ )
while len(UpperCamelCase__ ) != 1:
_UpperCamelCase = [int(UpperCamelCase__ ) for i in num_string]
_UpperCamelCase = 0
for i in range(0 , len(UpperCamelCase__ ) ):
total += numbers[i]
_UpperCamelCase = str(UpperCamelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( a__ , a__ , a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = 1.5
_UpperCamelCase = int(factor * num_class_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=a__ )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
_UpperCamelCase = client.query(text=a__ )
if len(a__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
_UpperCamelCase = int(factor * num_images )
_UpperCamelCase = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = tqdm(desc="downloading real regularization images" , total=a__ )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
_UpperCamelCase = class_images[count]
count += 1
try:
_UpperCamelCase = requests.get(images["url"] )
if img.status_code == 200:
_UpperCamelCase = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase = argparse.ArgumentParser("" , add_help=a__ )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ )
parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 82
| 0
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
_a : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def snake_case__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str ):
for attribute in key.split("." ):
lowerCAmelCase__ :List[Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowerCAmelCase__ :Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
lowerCAmelCase__ :Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowerCAmelCase__ :str = value
elif weight_type == "weight_g":
lowerCAmelCase__ :Dict = value
elif weight_type == "weight_v":
lowerCAmelCase__ :Optional[int] = value
elif weight_type == "bias":
lowerCAmelCase__ :List[Any] = value
else:
lowerCAmelCase__ :Optional[Any] = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase__ :Union[str, Any] = []
lowerCAmelCase__ :Optional[int] = fairseq_model.state_dict()
lowerCAmelCase__ :List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ :Any = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase__ :Dict = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ :Union[str, Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCAmelCase__ :str = True
if "*" in mapped_key:
lowerCAmelCase__ :List[Any] = name.split(__snake_case )[0].split("." )[-2]
lowerCAmelCase__ :Optional[int] = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
lowerCAmelCase__ :Dict = "weight_g"
elif "weight_v" in name:
lowerCAmelCase__ :Dict = "weight_v"
elif "bias" in name:
lowerCAmelCase__ :str = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ :Dict = "weight"
else:
lowerCAmelCase__ :Optional[int] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case__ ( UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase__ :List[Any] = full_name.split("conv_layers." )[-1]
lowerCAmelCase__ :List[str] = name.split("." )
lowerCAmelCase__ :Optional[int] = int(items[0] )
lowerCAmelCase__ :Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowerCAmelCase__ :Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowerCAmelCase__ :int = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
lowerCAmelCase__ :Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
lowerCAmelCase__ :List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def snake_case__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Optional[int]=True ):
if config_path is not None:
lowerCAmelCase__ :Optional[int] = UniSpeechSatConfig.from_pretrained(__snake_case )
else:
lowerCAmelCase__ :Any = UniSpeechSatConfig()
lowerCAmelCase__ :List[Any] = ""
if is_finetuned:
lowerCAmelCase__ :Any = UniSpeechSatForCTC(__snake_case )
else:
lowerCAmelCase__ :Optional[int] = UniSpeechSatForPreTraining(__snake_case )
lowerCAmelCase__ :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowerCAmelCase__ :List[str] = model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a : Any = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 145
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__snake_case : List[Any] = parser.parse_args()
if args.model_type == "bert":
__snake_case : int = BertForMaskedLM.from_pretrained(args.model_name)
__snake_case : Tuple = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
__snake_case : List[Any] = model.state_dict()
__snake_case : Any = {}
for w in ["word_embeddings", "position_embeddings"]:
__snake_case : List[str] = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
__snake_case : Optional[int] = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
__snake_case : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
__snake_case : Tuple = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
__snake_case : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
__snake_case : Union[str, Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
__snake_case : Any = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
__snake_case : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
__snake_case : Union[str, Any] = state_dict['cls.predictions.decoder.weight']
__snake_case : Union[str, Any] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case : Union[str, Any] = state_dict[F"""cls.predictions.transform.dense.{w}"""]
__snake_case : List[str] = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 293
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """WhisperFeatureExtractor"""
__magic_name__ : List[Any] = """WhisperTokenizer"""
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
A__ : Union[str, Any] =self.feature_extractor
A__ : Union[str, Any] =False
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=True ):
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ )
def __call__( self : Any , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Tuple =kwargs.pop("audio" , UpperCamelCase__ )
A__ : Optional[Any] =kwargs.pop("sampling_rate" , UpperCamelCase__ )
A__ : List[str] =kwargs.pop("text" , UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A__ : str =args[0]
A__ : List[Any] =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A__ : List[Any] =self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None:
A__ : str =self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A__ : List[Any] =encodings["input_ids"]
return inputs
def _UpperCAmelCase ( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]="np" ):
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
| 595
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.txt"}
__A : Dict = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__A : List[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Any =collections.OrderedDict()
with open(UpperCamelCase , "r" , encoding="utf-8" ) as reader:
A__ : Tuple =reader.readlines()
for index, token in enumerate(UpperCamelCase ):
A__ : List[str] =token.rstrip("\n" )
A__ : Union[str, Any] =index
return vocab
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : int=200 ):
A__ : List[Any] =vocab
A__ : List[str] =unk_token
A__ : Union[str, Any] =max_input_chars_per_word
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : int ):
A__ : Union[str, Any] =list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A__ : List[str] =0
A__ : List[str] =[]
while start < len(UpperCamelCase__ ):
A__ : int =len(UpperCamelCase__ )
A__ : Optional[int] =None
while start < end:
A__ : Optional[int] ="".join(chars[start:end] )
if substr in self.vocab:
A__ : List[str] =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
A__ : Dict =end
return sub_tokens
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[Any] = False
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str="<d>" , UpperCamelCase__ : Optional[int]="</d>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="</n>" , UpperCamelCase__ : Dict="</_>" , UpperCamelCase__ : Optional[int]="left" , **UpperCamelCase__ : Tuple , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Union[str, Any] =bod_token
A__ : str =eod_token
A__ : Any =load_vocab(UpperCamelCase__ )
A__ : Dict =self.encoder[space_token]
A__ : Any =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ : Any =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
A__ : int ={v: k for k, v in self.encoder.items()}
A__ : Union[str, Any] =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.encoder[self.bod_token]
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.encoder[self.eod_token]
@property
def _UpperCAmelCase ( self : List[Any] ):
return self.encoder["\n"]
@property
def _UpperCAmelCase ( self : Tuple ):
return len(self.encoder )
def _UpperCAmelCase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
A__ : int =[i for i in token_ids if i >= 0]
A__ : List[Any] =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
return token in self.encoder
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : List[str] ):
return "".join(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Any ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Dict ):
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if os.path.isdir(UpperCamelCase__ ):
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A__ : Dict =(filename_prefix + "-" if filename_prefix else "") + save_directory
A__ : Any =0
if " " in self.encoder:
A__ : Any =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A__ : Any =self.encoder["\n"]
del self.encoder["\n"]
A__ : Tuple =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A__ : Dict =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 595
| 1
|
import re
import subprocess
import sys
__lowerCamelCase : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowerCamelCase : List[str] = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode("""utf-8""").split()
__lowerCamelCase : Dict = """|""".join(sys.argv[1:])
__lowerCamelCase : List[Any] = re.compile(rF"^({joined_dirs}).*?\.py$")
__lowerCamelCase : List[str] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 385
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
__SCREAMING_SNAKE_CASE : List[str] = k.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return k
def A_ ( __SCREAMING_SNAKE_CASE : dict , __SCREAMING_SNAKE_CASE : dict ) -> PegasusForConditionalGeneration:
__SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = PegasusForConditionalGeneration(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = torch_model.model.state_dict()
__SCREAMING_SNAKE_CASE : Dict = {}
for k, v in tf_weights.items():
__SCREAMING_SNAKE_CASE : List[str] = rename_state_dict_key(__SCREAMING_SNAKE_CASE )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__SCREAMING_SNAKE_CASE : Dict = v.T
__SCREAMING_SNAKE_CASE : Any = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__SCREAMING_SNAKE_CASE : Optional[Any] = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : Tuple = mapping['''shared.weight''']
__SCREAMING_SNAKE_CASE : List[Any] = {k: torch.zeros_like(__SCREAMING_SNAKE_CASE ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch_model.model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( __SCREAMING_SNAKE_CASE : Union[str, Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
__SCREAMING_SNAKE_CASE : Any = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__SCREAMING_SNAKE_CASE , desc='''converting tf checkpoint to dict''' ):
__SCREAMING_SNAKE_CASE : Tuple = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE : Any = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = array
return tf_weights
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
# save tokenizer first
__SCREAMING_SNAKE_CASE : List[str] = Path(__SCREAMING_SNAKE_CASE ).parent.name
__SCREAMING_SNAKE_CASE : Optional[int] = task_specific_params[f"""summarization_{dataset}"""]['''max_position_embeddings''']
__SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__SCREAMING_SNAKE_CASE )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__SCREAMING_SNAKE_CASE )
# convert model
__SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__SCREAMING_SNAKE_CASE : Dict = task_specific_params
__SCREAMING_SNAKE_CASE : Optional[int] = convert_pegasus(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
torch_model.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__SCREAMING_SNAKE_CASE , Path(__SCREAMING_SNAKE_CASE ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_A = parser.parse_args()
if args.save_dir is None:
_A = Path(args.tf_ckpt_path).parent.name
_A = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 158
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("""Input value must be an 'int' type""" )
snake_case__ : List[str] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 419
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : Tuple = BlipImageProcessor()
snake_case__ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case__ : Dict = BlipaProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).tokenizer
def __UpperCamelCase ( self , **__SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ).image_processor
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
snake_case__ : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Union[str, Any] = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case__ : Any = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
snake_case__ : int = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """lower newer"""
snake_case__ : List[Any] = processor(text=__SCREAMING_SNAKE_CASE )
snake_case__ : int = tokenizer(__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[str] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = """lower newer"""
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Tuple = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : List[Any] = BlipaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 419
| 1
|
from __future__ import annotations
lowerCAmelCase_ = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCAmelCase_ = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def lowerCamelCase_ ( _UpperCamelCase ) -> list[float]:
"""simple docstring"""
snake_case_ : List[Any] = []
snake_case_ : Any = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
snake_case_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
snake_case_ : List[Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def lowerCamelCase_ ( _UpperCamelCase ) -> list[float]:
"""simple docstring"""
snake_case_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
snake_case_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case_ : int = inner
break
result.append(_UpperCamelCase )
return result
def lowerCamelCase_ ( _UpperCamelCase ) -> list[float]:
"""simple docstring"""
snake_case_ : Tuple = len(_UpperCamelCase )
snake_case_ : list[float] = []
snake_case_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 60
|
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ):
if len(_UpperCAmelCase ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_lowerCAmelCase :list[float] = list(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = degree
def __add__( self: str , _UpperCAmelCase: Polynomial ):
if self.degree > polynomial_a.degree:
_lowerCAmelCase :Any = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _UpperCAmelCase )
else:
_lowerCAmelCase :List[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _UpperCAmelCase )
def __sub__( self: str , _UpperCAmelCase: Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self: Union[str, Any] ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self: int , _UpperCAmelCase: Polynomial ):
_lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ):
_lowerCAmelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self: Union[str, Any] ):
_lowerCAmelCase :Dict = ''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase )
return polynomial
def __repr__( self: Optional[Any] ):
return self.__str__()
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
_lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ):
_lowerCAmelCase :list[float] = [0] * (self.degree + 2)
_lowerCAmelCase :str = constant
for i in range(self.degree + 1 ):
_lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _UpperCAmelCase )
def __eq__( self: List[Any] , _UpperCAmelCase: object ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self: Optional[Any] , _UpperCAmelCase: object ):
return not self.__eq__(_UpperCAmelCase )
| 687
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''hidden_sizes'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''neck_hidden_sizes'''))
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''num_attention_heads'''))
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=640 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="silu" , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : Tuple=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = last_hidden_size
__a = num_attention_heads
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels)
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = MobileViTModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTForSemanticSegmentation(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase__ : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Any = False
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Optional[Any] = False
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = MobileViTModelTester(self)
__a = MobileViTConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViT does not output attentions''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int]):
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__SCREAMING_SNAKE_CASE)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-1.93_64, -1.23_27, -0.46_53]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
__a = model.to(__SCREAMING_SNAKE_CASE)
__a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
__a = model.to(__SCREAMING_SNAKE_CASE)
__a = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''')
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)])
__a = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
__a = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE)
__a = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE)
| 60
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60
| 1
|
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> qiskit.result.counts.Counts:
snake_case : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
snake_case : Any = qiskit.QuantumCircuit(4 ,2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 ,2 )
qc_ha.cx(1 ,2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 ,1 ,3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 ,0 ) # extract XOR value
qc_ha.measure(3 ,1 ) # extract AND value
# Execute the circuit on the qasm simulator
snake_case : Any = qiskit.execute(lowercase ,lowercase ,shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 587
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : int = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = 3_2 , A=PILImageResampling.BILINEAR , A = True , **A , ) -> None:
snake_case : Any = do_resize
snake_case : Optional[int] = do_rescale
snake_case : str = size_divisor
snake_case : Dict = resample
super().__init__(**A )
def UpperCAmelCase ( self , A , A , A , A = None , **A ) -> np.ndarray:
snake_case , snake_case : Union[str, Any] = get_image_size(A )
# Rounds the height and width down to the closest multiple of size_divisor
snake_case : Any = height // size_divisor * size_divisor
snake_case : Optional[int] = width // size_divisor * size_divisor
snake_case : Union[str, Any] = resize(A , (new_h, new_w) , resample=A , data_format=A , **A )
return image
def UpperCAmelCase ( self , A , A , A = None , **A ) -> np.ndarray:
return rescale(image=A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> BatchFeature:
snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Any = size_divisor if size_divisor is not None else self.size_divisor
snake_case : str = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
snake_case : List[str] = make_list_of_images(A )
if not valid_images(A ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
snake_case : str = [to_numpy_array(A ) for img in images]
if do_resize:
snake_case : Optional[Any] = [self.resize(A , size_divisor=A , resample=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(A , scale=1 / 2_5_5 ) for image in images]
snake_case : Optional[int] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : str = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 587
| 1
|
from collections.abc import Generator
from math import sin
def a ( _UpperCAmelCase : bytes ):
'''simple docstring'''
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__UpperCAmelCase : Optional[Any] = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
__UpperCAmelCase : str = format(_UpperCAmelCase , '''08x''' )[-8:]
__UpperCAmelCase : Any = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def a ( _UpperCAmelCase : bytes ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = B''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__UpperCAmelCase : Union[str, Any] = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def a ( _UpperCAmelCase : bytes ):
'''simple docstring'''
if len(_UpperCAmelCase ) % 5_12 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 5_12 ):
__UpperCAmelCase : List[Any] = bit_string[pos : pos + 5_12]
__UpperCAmelCase : List[Any] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
__UpperCAmelCase : str = format(_UpperCAmelCase , '''032b''' )
__UpperCAmelCase : Tuple = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def a ( _UpperCAmelCase : bytes ):
'''simple docstring'''
__UpperCAmelCase : Dict = preprocess(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__UpperCAmelCase : Union[str, Any] = 0x6745_2301
__UpperCAmelCase : Any = 0xEFCD_AB89
__UpperCAmelCase : Optional[int] = 0x98BA_DCFE
__UpperCAmelCase : Dict = 0x1032_5476
__UpperCAmelCase : Tuple = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = aa
__UpperCAmelCase : Union[str, Any] = ba
__UpperCAmelCase : Optional[int] = ca
__UpperCAmelCase : Union[str, Any] = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__UpperCAmelCase : List[str] = d ^ (b & (c ^ d))
__UpperCAmelCase : Any = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__UpperCAmelCase : Tuple = c ^ (d & (b ^ c))
__UpperCAmelCase : Optional[int] = (5 * i + 1) % 16
elif i <= 47:
__UpperCAmelCase : Any = b ^ c ^ d
__UpperCAmelCase : Optional[Any] = (3 * i + 5) % 16
else:
__UpperCAmelCase : Tuple = c ^ (b | not_aa(_UpperCAmelCase ))
__UpperCAmelCase : Dict = (7 * i) % 16
__UpperCAmelCase : Dict = (f + a + added_consts[i] + block_words[g]) % 2**32
__UpperCAmelCase : Union[str, Any] = d
__UpperCAmelCase : str = c
__UpperCAmelCase : Any = b
__UpperCAmelCase : Dict = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__UpperCAmelCase : Optional[int] = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Any = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Tuple = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase : Dict = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase : int = model(a_ )['''last_hidden_state''']
__UpperCAmelCase : Optional[int] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , a_ )
# compare the actual values for a slice.
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 241
| 0
|
import os
import pytest
from attr import dataclass
__A : int = "us-east-1" # defaults region
@dataclass
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 42
__magic_name__ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
__magic_name__ = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5_500,
}
__magic_name__ = {**hyperparameters, 'max_steps': 1_000}
@property
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase__ ( self ):
return F"{self.framework}-transfromers-test"
@property
def lowerCAmelCase__ ( self ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_A = SageMakerTestEnvironment(framework=request.cls.framework )
| 27
|
'''simple docstring'''
from math import factorial
UpperCamelCase_ = {str(digit): factorial(digit) for digit in range(10)}
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : int = 60 , _lowerCamelCase : int = 1_00_00_00 ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
_lowerCAmelCase : Union[str, Any] = 0
# the cached sizes of the previous chains
_lowerCAmelCase : dict[int, int] = {}
for start_chain_element in range(1 , _lowerCamelCase ):
# The temporary set will contain the elements of the chain
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Dict = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
_lowerCAmelCase : Union[str, Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(_lowerCamelCase )
chain_set_length += 1
_lowerCAmelCase : List[Any] = digit_factorial_sum(_lowerCamelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
_lowerCAmelCase : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 384
| 0
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Dict =parent
snake_case__ : int =batch_size
snake_case__ : int =seq_length
snake_case__ : List[str] =is_training
snake_case__ : Optional[Any] =use_input_mask
snake_case__ : Optional[int] =use_token_type_ids
snake_case__ : Any =use_labels
snake_case__ : Union[str, Any] =vocab_size
snake_case__ : Union[str, Any] =hidden_size
snake_case__ : List[str] =num_hidden_layers
snake_case__ : Any =num_attention_heads
snake_case__ : Tuple =intermediate_size
snake_case__ : List[str] =hidden_act
snake_case__ : Optional[Any] =hidden_dropout_prob
snake_case__ : Tuple =attention_probs_dropout_prob
snake_case__ : Optional[int] =max_position_embeddings
snake_case__ : int =type_vocab_size
snake_case__ : List[str] =type_sequence_label_size
snake_case__ : List[str] =initializer_range
snake_case__ : Optional[Any] =num_labels
snake_case__ : Union[str, Any] =num_choices
snake_case__ : Optional[Any] =scope
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] =None
if self.use_input_mask:
snake_case__ : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[str] =None
if self.use_token_type_ids:
snake_case__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str =None
snake_case__ : List[Any] =None
snake_case__ : Dict =None
if self.use_labels:
snake_case__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =LlamaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : Union[str, Any] =model(_lowercase , attention_mask=_lowercase )
snake_case__ : Optional[int] =model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
snake_case__ : Dict =True
snake_case__ : int =LlamaModel(_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : Optional[int] =model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
snake_case__ : Any =model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
snake_case__ : str =model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
snake_case__ : Tuple =LlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : Optional[int] =model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] =True
snake_case__ : Dict =True
snake_case__ : Any =LlamaForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
snake_case__ : Tuple =model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
snake_case__ : str =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Any =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Tuple =torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Dict =torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : int =model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )['''hidden_states'''][0]
snake_case__ : Dict =model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )['''hidden_states'''][0]
# select random slice
snake_case__ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : List[str] =output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : int =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[int] =self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : Tuple =config_and_inputs
snake_case__ : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Optional[Any] =LlamaModelTester(self )
snake_case__ : Dict =ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : int =type
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__, snake_case__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] =3
snake_case__ : Dict =input_dict['''input_ids''']
snake_case__ : int =input_ids.ne(1 ).to(_lowercase )
snake_case__ : Dict =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : str =LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : List[Any] =model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__, snake_case__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict =3
snake_case__ : Dict ='''single_label_classification'''
snake_case__ : Union[str, Any] =input_dict['''input_ids''']
snake_case__ : Optional[int] =input_ids.ne(1 ).to(_lowercase )
snake_case__ : Optional[int] =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Tuple =LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : Tuple =model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
snake_case__, snake_case__ : int =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] =3
snake_case__ : Optional[int] ='''multi_label_classification'''
snake_case__ : Dict =input_dict['''input_ids''']
snake_case__ : Optional[Any] =input_ids.ne(1 ).to(_lowercase )
snake_case__ : Tuple =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Union[str, Any] =LlamaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
snake_case__ : List[Any] =model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
snake_case__, snake_case__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[int] =ids_tensor([1, 10] , config.vocab_size )
snake_case__ : Any =ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : List[Any] =LlamaModel(_lowercase )
original_model.to(_lowercase )
original_model.eval()
snake_case__ : Any =original_model(_lowercase ).last_hidden_state
snake_case__ : str =original_model(_lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : str ={'''type''': scaling_type, '''factor''': 10.0}
snake_case__ : Dict =LlamaModel(_lowercase )
scaled_model.to(_lowercase )
scaled_model.eval()
snake_case__ : Optional[int] =scaled_model(_lowercase ).last_hidden_state
snake_case__ : Tuple =scaled_model(_lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowercase , _lowercase , atol=1e-5 ) )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Dict =[1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case__ : Tuple =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
snake_case__ : Any =model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ : str =torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Tuple =torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : Tuple =[1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case__ : List[str] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
snake_case__ : Optional[int] =model(torch.tensor(_lowercase ) )
# Expected mean on dim = -1
snake_case__ : Any =torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : str =torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] =[1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case__ : Dict =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
snake_case__ : int =model(torch.tensor(_lowercase ) )
# Expected mean on dim = -1
snake_case__ : Optional[Any] =torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ : Optional[int] =torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : int =[1, 306, 4658, 278, 6593, 310, 2834, 338]
snake_case__ : Optional[Any] =LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
snake_case__ : Union[str, Any] =model(torch.tensor(_lowercase ) )
snake_case__ : Dict =torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowercase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ : List[Any] =torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowercase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
snake_case__ : Union[str, Any] ='''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
snake_case__ : List[str] ='''Simply put, the theory of relativity states that '''
snake_case__ : Any =LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
snake_case__ : Union[str, Any] =tokenizer.encode(_lowercase , return_tensors='''pt''' )
snake_case__ : Any =LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=_lowercase )
# greedy generation outputs
snake_case__ : Union[str, Any] =model.generate(_lowercase , max_new_tokens=64 , top_p=_lowercase , temperature=1 , do_sample=_lowercase )
snake_case__ : Optional[int] =tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 703
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
snake_case__ : int =torch.nn.Linear(10 , 10 )
snake_case__ : int =torch.optim.SGD(model.parameters() , 0.1 )
snake_case__ : str =Accelerator()
snake_case__ : Any =accelerator.prepare(__SCREAMING_SNAKE_CASE )
try:
pickle.loads(pickle.dumps(__SCREAMING_SNAKE_CASE ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 408
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowercase : str =(
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowercase : list[int] =[ord(letter) for letter in string.ascii_lowercase]
_lowercase : set[int] ={ord(char) for char in VALID_CHARS}
_lowercase : list[str] =["the", "be", "to", "of", "and", "in", "that", "have"]
def A__ ( lowercase: list[int], lowercase: tuple[int, ...] ) -> str | None:
A : str =""
A : int
A : int
A : int
for keychar, cipherchar in zip(cycle(lowercase ), lowercase ):
A : Tuple =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase )
return decoded
def A__ ( lowercase: list[int] ) -> list[str]:
A : list[str] =[]
for key in product(lowercase, repeat=3 ):
A : Any =try_key(lowercase, lowercase )
if encoded is not None:
possibles.append(lowercase )
return possibles
def A__ ( lowercase: list[str], lowercase: str ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A__ ( lowercase: str = "p059_cipher.txt" ) -> int:
A : list[int]
A : list[str]
A : str
A : str
A : str =Path(lowercase ).parent.joinpath(lowercase ).read_text(encoding='utf-8' )
A : Tuple =[int(lowercase ) for number in data.strip().split(',' )]
A : Dict =filter_valid_chars(lowercase )
for common_word in COMMON_WORDS:
A : List[Any] =filter_common_word(lowercase, lowercase )
if len(lowercase ) == 1:
break
A : str =possibles[0]
return sum(ord(lowercase ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 305
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A__ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
_lowercase = StableUnCLIPImgaImgPipeline
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowercase = frozenset([] )
def _UpperCamelCase( self : Any ):
a__ : List[Any] = 32
a__ : Optional[Any] = embedder_hidden_size
# image encoding components
a__ : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a__ : str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCamelCase__ , projection_dim=lowerCamelCase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a__ : int = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase__ )
a__ : Union[str, Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
a__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
a__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
a__ : int = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase__ , layers_per_block=1 , upcast_attention=lowerCamelCase__ , use_linear_projection=lowerCamelCase__ , )
torch.manual_seed(0 )
a__ : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , )
torch.manual_seed(0 )
a__ : str = AutoencoderKL()
a__ : int = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def _UpperCamelCase( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : int=True ):
if str(lowerCamelCase__ ).startswith("mps" ):
a__ : List[str] = torch.manual_seed(lowerCamelCase__ )
else:
a__ : Any = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
a__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if pil_image:
a__ : str = input_image * 0.5 + 0.5
a__ : Tuple = input_image.clamp(0 , 1 )
a__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : Dict = DiffusionPipeline.numpy_to_pil(lowerCamelCase__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _UpperCamelCase( self : Dict ):
a__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[int] = self.get_dummy_components()
a__ : List[Any] = StableUnCLIPImgaImgPipeline(**lowerCamelCase__ )
a__ : str = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
inputs.update({"image_embeds": None} )
a__ : int = sd_pipe(**lowerCamelCase__ ).images
a__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ : str = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase( self : Dict ):
a__ : Union[str, Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : int = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase__ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCamelCase__ )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[Any] ):
a__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
a__ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Dict = pipe(lowerCamelCase__ , "anime turle" , generator=lowerCamelCase__ , output_type="np" )
a__ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
a__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
a__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
a__ : Tuple = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
a__ : Optional[Any] = pipe(lowerCamelCase__ , "anime turle" , generator=lowerCamelCase__ , output_type="np" )
a__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCamelCase( self : str ):
a__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a__ : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
a__ : List[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a__ : List[str] = pipe(
lowerCamelCase__ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
a__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 708
|
def UpperCamelCase_ ( __a ) -> bool:
if num < 0:
return False
a__ : int = num
a__ : int = 0
while num > 0:
a__ : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151
| 0
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
UpperCAmelCase : int = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
UpperCAmelCase : Dict = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
UpperCAmelCase : Tuple = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(references[0] )
if any(len(__SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(__SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE = TER(
normalized=__SCREAMING_SNAKE_CASE , no_punct=__SCREAMING_SNAKE_CASE , asian_support=__SCREAMING_SNAKE_CASE , case_sensitive=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = sb_ter.corpus_score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 627
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "LayoutLMv2ImageProcessor"
lowerCAmelCase__ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __call__( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __SCREAMING_SNAKE_CASE : Union[List[List[int]], List[List[List[int]]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[List[int]]]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : Any , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
__SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = [text] # add batch dimension (as the image processor always adds a batch dimension)
__SCREAMING_SNAKE_CASE = features["""words"""]
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# add pixel values
__SCREAMING_SNAKE_CASE = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__SCREAMING_SNAKE_CASE = self.get_overflowing_images(__SCREAMING_SNAKE_CASE , encoded_inputs["""overflow_to_sample_mapping"""] )
__SCREAMING_SNAKE_CASE = images
return encoded_inputs
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f' {len(__SCREAMING_SNAKE_CASE )} and {len(__SCREAMING_SNAKE_CASE )}' )
return images_with_overflow
def UpperCAmelCase__ ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 627
| 1
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ : int = get_logger(__name__)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
__a : Tuple = "dummy_data"
__a : Union[str, Any] = "datasets"
__a : Any = False
def __init__( self : List[str] , lowercase : str , lowercase : str , lowercase : Union[Version, str] , lowercase : Optional[str] = None , lowercase : bool = False , lowercase : bool = True , lowercase : Optional[List[Callable]] = None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = dataset_name
UpperCamelCase__ = cache_dir
UpperCamelCase__ = use_local_dummy_data
UpperCamelCase__ = config
# download_callbacks take a single url as input
UpperCamelCase__ = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCamelCase__ = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCamelCase__ = str(lowercase )
# to be downloaded
UpperCamelCase__ = None
UpperCamelCase__ = None
@property
def A ( self : List[Any] ) -> List[str]:
'''simple docstring'''
if self._dummy_file is None:
UpperCamelCase__ = self.download_dummy_data()
return self._dummy_file
@property
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def A ( self : List[str] ) -> Any:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def A ( self : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCamelCase__ = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
if self._bucket_url is None:
UpperCamelCase__ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def A ( self : int ) -> Tuple:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def A ( self : Optional[Any] , lowercase : List[str] , *lowercase : int ) -> Any:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCamelCase__ = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCamelCase__ = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A ( self : Union[str, Any] , lowercase : Optional[Any] , *lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
return self.download_and_extract(lowercase )
def A ( self : Optional[Any] , lowercase : int , lowercase : Dict ) -> Tuple:
'''simple docstring'''
return self.download_and_extract(lowercase )
def A ( self : Optional[int] , lowercase : str , *lowercase : Union[str, Any] , **lowercase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return path
def A ( self : Tuple ) -> List[str]:
'''simple docstring'''
return {}
def A ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
UpperCamelCase__ = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
UpperCamelCase__ = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
UpperCamelCase__ = single_urls
UpperCamelCase__ = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
UpperCamelCase__ = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCamelCase__ = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A ( self : List[Any] , lowercase : List[str] , lowercase : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCamelCase__ = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , lowercase ) ) for url in data_url )
UpperCamelCase__ = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCamelCase__ = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase__ = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A ( self : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[int] ) -> List[Any]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase__ = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def A ( self : int , lowercase : Tuple ) -> Optional[int]:
'''simple docstring'''
def _iter_archive_members(lowercase : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
UpperCamelCase__ = Path(self.dummy_file ).parent
UpperCamelCase__ = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCamelCase__ = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
UpperCamelCase__ = Path(lowercase )
UpperCamelCase__ = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open("""rb""" )
def A ( self : Tuple , lowercase : Tuple ) -> List[str]:
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
UpperCamelCase__ = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(lowercase , lowercase )
| 265
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : str = field(default="audio-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__a : ClassVar[Features] = Features({"audio": Audio()} )
__a : ClassVar[Features] = Features({"labels": ClassLabel} )
__a : str = "audio"
__a : str = "labels"
def A ( self : List[Any] , lowercase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , lowercase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def A ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 265
| 1
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case__ ( UpperCamelCase , UpperCamelCase):
@register_to_config
def __init__( self : Optional[Any] , *,
_A : int = 4 , _A : int = 7_68 , _A : int , _A : Optional[Any] , ) -> List[Any]:
super().__init__()
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(_A ) )
# parameters for additional clip time embeddings
UpperCAmelCase_ : Dict = nn.Linear(_A , _A )
UpperCAmelCase_ : str = nn.Linear(_A , _A )
# parameters for encoder hidden states
UpperCAmelCase_ : str = clip_extra_context_tokens
UpperCAmelCase_ : List[str] = nn.Linear(
_A , self.clip_extra_context_tokens * cross_attention_dim )
UpperCAmelCase_ : Tuple = nn.Linear(_A , _A )
UpperCAmelCase_ : Dict = nn.LayerNorm(_A )
def A ( self : Union[str, Any] , *, _A : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : Any ) -> Optional[Any]:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCAmelCase_ : Tuple = image_embeddings.shape[0]
UpperCAmelCase_ : str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCAmelCase_ : List[str] = classifier_free_guidance_embeddings.expand(
_A , -1 )
UpperCAmelCase_ : Any = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCAmelCase_ : Optional[int] = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCAmelCase_ : str = self.embedding_proj(_A )
UpperCAmelCase_ : Optional[Any] = self.clip_image_embeddings_project_to_time_embeddings(_A )
UpperCAmelCase_ : int = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCAmelCase_ : Union[str, Any] = self.clip_extra_context_tokens_proj(_A )
UpperCAmelCase_ : Union[str, Any] = clip_extra_context_tokens.reshape(_A , -1 , self.clip_extra_context_tokens )
UpperCAmelCase_ : Union[str, Any] = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCAmelCase_ : int = self.encoder_hidden_states_proj(_A )
UpperCAmelCase_ : List[str] = self.text_encoder_hidden_states_norm(_A )
UpperCAmelCase_ : Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 541
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ :
def __init__( self : Union[str, Any] , _A : Optional[Any] , _A : Dict=2 , _A : Optional[Any]=32 , _A : List[str]=16 , _A : str=3 , _A : List[Any]=True , _A : Optional[int]=True , _A : Optional[int]=32 , _A : Optional[Any]=4 , _A : Optional[int]=[0, 1, 2, 3] , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Optional[Any]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=0.02 , _A : Optional[Any]=3 , _A : Union[str, Any]=[1, 3_84, 24, 24] , _A : int=True , _A : int=None , ) -> Tuple:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Dict = num_channels
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : List[str] = use_labels
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : int = backbone_out_indices
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : Union[str, Any] = backbone_featmap_shape
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_A , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : Union[str, Any] , _A : List[Any] , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = DPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , _A : str , _A : Tuple , _A : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.num_labels
UpperCAmelCase_ : Any = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : List[str] , _A : str , _A : Union[str, Any] , _A : int ) -> Dict:
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a_ = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Any = DPTModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> str:
pass
def A ( self : str ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : List[Any] ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(_A )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : int = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : str ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def A ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def A ( self : Tuple ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
if model_class in get_values(_A ):
continue
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.train()
UpperCAmelCase_ : Any = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : Tuple = model(**_A ).loss
loss.backward()
def A ( self : Union[str, Any] ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Optional[Any] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
UpperCAmelCase_ : int = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(_A , _A , return_labels=_A )
UpperCAmelCase_ : List[str] = model(**_A ).loss
loss.backward()
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = _config_zero_init(_A )
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(config=_A )
# Skip the check for the backbone
UpperCAmelCase_ : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
UpperCAmelCase_ : Union[str, Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : Tuple ) -> Optional[Any]:
pass
@slow
def A ( self : Any ) -> int:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
UpperCAmelCase_ : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Union[str, Any] ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = '''add'''
with self.assertRaises(_A ):
UpperCAmelCase_ : Optional[Any] = DPTForDepthEstimation(_A )
def __UpperCAmelCase ( ) -> List[Any]:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
UpperCAmelCase_ : Any = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_A )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : str = model(**_A )
UpperCAmelCase_ : List[Any] = outputs.predicted_depth
# verify the predicted depth
UpperCAmelCase_ : str = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _A )
UpperCAmelCase_ : Tuple = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _A , atol=1e-4 ) )
| 541
| 1
|
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCamelCase : List[Any] =get_logger(__name__)
class __snake_case:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : List[Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__A : str = module._original_module if isinstance(snake_case_ , _PatchedModuleObj ) else module
class __snake_case:
'''simple docstring'''
_UpperCAmelCase = []
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : Tuple = obj
__A : Union[str, Any] = target
__A : Optional[int] = new
__A : Union[str, Any] = target.split('.' )[0]
__A : Any = {}
__A : Any = attrs or []
def __enter__( self ):
'''simple docstring'''
*__A , __A : Dict = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case_ ) ):
try:
__A : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__A : str = getattr(self.obj , snake_case_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__A : Any = obj_attr
# patch at top level
setattr(self.obj , snake_case_ , _PatchedModuleObj(snake_case_ , attrs=self.attrs ) )
__A : Optional[Any] = getattr(self.obj , snake_case_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case_ , snake_case_ , _PatchedModuleObj(getattr(snake_case_ , snake_case_ , snake_case_ ) , attrs=self.attrs ) )
__A : Tuple = getattr(snake_case_ , snake_case_ )
# finally set the target attribute
setattr(snake_case_ , snake_case_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__A : List[Any] = getattr(import_module('.'.join(snake_case_ ) ) , snake_case_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case_ ) is attr_value:
__A : Optional[Any] = getattr(self.obj , snake_case_ )
setattr(self.obj , snake_case_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__A : List[str] = globals()['__builtins__'][target_attr]
setattr(self.obj , snake_case_ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self , *__lowerCamelCase ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , snake_case_ , self.original.pop(snake_case_ ) )
def _a ( self ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def _a ( self ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 720
|
"""simple docstring"""
from __future__ import annotations
def _lowercase ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
| 0
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __a ( _UpperCamelCase: Union[str, Any] ) -> Tuple:
"""simple docstring"""
return EnvironmentCommand()
class _a ( _A ):
@staticmethod
def _lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
_snake_case = parser.add_parser("env" )
download_parser.set_defaults(func=__lowerCamelCase )
def _lowercase ( self ) -> Tuple:
_snake_case = huggingface_hub.__version__
_snake_case = "not installed"
_snake_case = "NA"
if is_torch_available():
import torch
_snake_case = torch.__version__
_snake_case = torch.cuda.is_available()
_snake_case = "not installed"
if is_transformers_available():
import transformers
_snake_case = transformers.__version__
_snake_case = "not installed"
if is_accelerate_available():
import accelerate
_snake_case = accelerate.__version__
_snake_case = "not installed"
if is_xformers_available():
import xformers
_snake_case = xformers.__version__
_snake_case = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__lowerCamelCase ) )
return info
@staticmethod
def _lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 185
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase, collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __lowercase :
def __a ( self : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def __a ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
pass
def __a ( self : int , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any=None , **__lowerCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(__lowerCamelCase , __lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel(__lowerCamelCase )
lowercase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase ,lowercase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowercase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase ,lowercase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowerCamelCase )
lowercase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : Tuple ) -> Any:
'''simple docstring'''
lowercase ,lowercase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowercase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
lowercase = model(input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase )
lowercase = after_output[0].numpy()
lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1E-5 )
def __a ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase ,lowercase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowercase = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
lowercase = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase = to_atuple(vision_model.config.image_size )
lowercase = to_atuple(vision_model.config.patch_size )
lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray , __lowerCamelCase : float ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.abs((a - b) ).max()
self.assertLessEqual(__lowerCamelCase , __lowerCamelCase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __a ( self : str ) -> Dict:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowerCamelCase )
def __a ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowerCamelCase )
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowerCamelCase )
def __a ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
self.check_save_load(**__lowerCamelCase )
def __a ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowerCamelCase )
@slow
def __a ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = self.get_pretrained_model_and_inputs()
lowercase = model_a(**__lowerCamelCase )
lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel.from_pretrained(__lowerCamelCase )
lowercase = model_a(**__lowerCamelCase )
lowercase = after_outputs[0].numpy()
lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowerCamelCase , 1E-5 )
@require_tf
class __lowercase ( _A , unittest.TestCase ):
def __a ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''' )
lowercase = 13
lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase = random_attention_mask([batch_size, 4] )
lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __a ( self : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = TFViTModel(__lowerCamelCase , name='''vision_model''' )
lowercase = TFBertModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def __a ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase = TFViTModelTester(self )
lowercase = TFBertModelTester(self )
lowercase = vit_model_tester.prepare_config_and_inputs()
lowercase = bert_model_tester.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase = vision_config_and_inputs
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowercase ( _A , unittest.TestCase ):
def __a ( self : int ) -> str:
'''simple docstring'''
lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''' )
lowercase = 13
lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase = random_attention_mask([batch_size, 4] )
lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __a ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , **__lowerCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase ,lowercase = self.get_vision_text_model(__lowerCamelCase , __lowerCamelCase )
lowercase = TFVisionTextDualEncoderModel(vision_model=__lowerCamelCase , text_model=__lowerCamelCase )
lowercase = model(
input_ids=__lowerCamelCase , pixel_values=__lowerCamelCase , attention_mask=__lowerCamelCase , output_attentions=__lowerCamelCase )
lowercase = output.vision_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase = to_atuple(vision_model.config.image_size )
lowercase = to_atuple(vision_model.config.patch_size )
lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase = output.text_model_output.attentions
self.assertEqual(len(__lowerCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase = TFDeiTModel(__lowerCamelCase , name='''vision_model''' )
lowercase = TFRobertaModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def __a ( self : Any ) -> str:
'''simple docstring'''
lowercase = TFDeiTModelTester(self )
lowercase = TFRobertaModelTester(self )
lowercase = vit_model_tester.prepare_config_and_inputs()
lowercase = bert_model_tester.prepare_config_and_inputs()
lowercase ,lowercase ,lowercase = vision_config_and_inputs
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowercase ( _A , unittest.TestCase ):
def __a ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''' )
lowercase = 13
lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
lowercase = random_attention_mask([batch_size, 4] )
lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def __a ( self : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> str:
'''simple docstring'''
lowercase = TFCLIPVisionModel(__lowerCamelCase , name='''vision_model''' )
lowercase = TFBertModel(__lowerCamelCase , name='''text_model''' )
return vision_model, text_model
def __a ( self : Tuple ) -> str:
'''simple docstring'''
lowercase = TFCLIPVisionModelTester(self )
lowercase = TFBertModelTester(self )
lowercase = clip_model_tester.prepare_config_and_inputs()
lowercase = bert_model_tester.prepare_config_and_inputs()
lowercase ,lowercase = vision_config_and_inputs
(
(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,(
lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def __a ( self : List[str] ) -> int:
'''simple docstring'''
lowercase = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__lowerCamelCase )
lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__lowerCamelCase , padding=__lowerCamelCase , return_tensors='''np''' )
lowercase = model(**__lowerCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowerCamelCase , atol=1E-3 ) )
| 604
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
snake_case__ : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
snake_case__ : ClassVar[Features] = Features({'audio': Audio()} )
snake_case__ : ClassVar[Features] = Features({'labels': ClassLabel} )
snake_case__ : str = "audio"
snake_case__ : str = "labels"
def _UpperCamelCase ( self :int , __magic_name__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __magic_name__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
a__ = copy.deepcopy(self )
a__ = self.label_schema.copy()
a__ = features[self.label_column]
a__ = label_schema
return task_template
@property
def _UpperCamelCase ( self :Optional[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 158
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Union[str, Any]=7 , __magic_name__ :Optional[int]=3 , __magic_name__ :List[Any]=30 , __magic_name__ :Any=400 , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[Any]=None , __magic_name__ :int=True , __magic_name__ :Dict=[0.5, 0.5, 0.5] , __magic_name__ :int=[0.5, 0.5, 0.5] , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[Any]=1 / 255 , __magic_name__ :List[Any]=True , ) -> Optional[int]:
'''simple docstring'''
a__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def _UpperCamelCase ( self :Any ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _UpperCamelCase ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :Dict=False ) -> Optional[int]:
'''simple docstring'''
if not batched:
a__ = image_inputs[0]
if isinstance(__magic_name__ , Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['''shortest_edge'''] * h / w )
a__ = self.size['''shortest_edge''']
elif w > h:
a__ = self.size['''shortest_edge''']
a__ = int(self.size['''shortest_edge'''] * w / h )
else:
a__ = self.size['''shortest_edge''']
a__ = self.size['''shortest_edge''']
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(__magic_name__ , key=lambda __magic_name__ : item[0] )[0]
a__ = max(__magic_name__ , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Union[str, Any] = YolosImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self :Dict ) -> List[Any]:
'''simple docstring'''
a__ = YolosImageProcessingTester(self )
@property
def _UpperCamelCase ( self :List[Any] ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__ , '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size''' ) )
def _UpperCamelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
a__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__magic_name__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __magic_name__ )
def _UpperCamelCase ( self :str ) -> Tuple:
'''simple docstring'''
pass
def _UpperCamelCase ( self :Tuple ) -> str:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
a__ = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self :Tuple ) -> int:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ = image_processing(__magic_name__ , return_tensors='''pt''' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__magic_name__ , batched=__magic_name__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _UpperCamelCase ( self :str ) -> Optional[int]:
'''simple docstring'''
a__ = self.image_processing_class(**self.image_processor_dict )
a__ = self.image_processing_class(do_resize=__magic_name__ , do_normalize=__magic_name__ , do_rescale=__magic_name__ )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
a__ = image_processing_a.pad(__magic_name__ , return_tensors='''pt''' )
a__ = image_processing_a(__magic_name__ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self :int ) -> int:
'''simple docstring'''
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
a__ = json.loads(f.read() )
a__ = {'''image_id''': 39769, '''annotations''': target}
# encode them
a__ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
a__ = image_processing(images=__magic_name__ , annotations=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
a__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
a__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
a__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
a__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
@slow
def _UpperCamelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
a__ = json.loads(f.read() )
a__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
a__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
a__ = YolosImageProcessor(format='''coco_panoptic''' )
a__ = image_processing(images=__magic_name__ , annotations=__magic_name__ , masks_path=__magic_name__ , return_tensors='''pt''' )
# verify pixel values
a__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , __magic_name__ )
a__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __magic_name__ , atol=1e-4 ) )
# verify area
a__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __magic_name__ ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __magic_name__ )
a__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __magic_name__ , atol=1e-3 ) )
# verify image_id
a__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __magic_name__ ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __magic_name__ ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __magic_name__ ) )
# verify masks
a__ = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __magic_name__ )
# verify orig_size
a__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __magic_name__ ) )
# verify size
a__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __magic_name__ ) )
| 158
| 1
|
def A__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str=False ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE__: int= len(set_a.intersection(snake_case_ ) )
if alternative_union:
SCREAMING_SNAKE_CASE__: int= len(snake_case_ ) + len(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: str= len(set_a.union(snake_case_ ) )
return intersection / union
if isinstance(snake_case_ , (list, tuple) ) and isinstance(snake_case_ , (list, tuple) ):
SCREAMING_SNAKE_CASE__: Any= [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE__: List[str]= len(snake_case_ ) + len(snake_case_ )
return len(snake_case_ ) / union
else:
SCREAMING_SNAKE_CASE__: Optional[int]= set_a + [element for element in set_b if element not in set_a]
return len(snake_case_ ) / len(snake_case_ )
return len(snake_case_ ) / len(snake_case_ )
return None
if __name__ == "__main__":
lowercase_ : Any = {'a', 'b', 'c', 'd', 'e'}
lowercase_ : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 64
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'Hello world! cécé herlolip'
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
lowerCamelCase__: Any = roberta.model.encoder.sentence_encoder
lowerCamelCase__: Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _UpperCamelCase )
lowerCamelCase__: str = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[str] = roberta_sent_encoder.embed_positions.weight
lowerCamelCase__: Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase__: Any = roberta_sent_encoder.layer_norm.weight
lowerCamelCase__: Tuple = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: BertLayer = model.roberta.encoder.layer[i]
lowerCamelCase__: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCamelCase__: RobertaAttention = layer.attention
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase__: Any = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase__: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase__: Tuple = roberta_layer.self_attn.q_proj.weight
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.q_proj.bias
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.k_proj.weight
lowerCamelCase__: int = roberta_layer.self_attn.k_proj.bias
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase__: int = roberta_layer.self_attn.out_proj.weight
lowerCamelCase__: Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase__: Any = roberta_layer.final_layer_norm.weight
lowerCamelCase__: Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase__: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Tuple = roberta_layer.fca.weight
lowerCamelCase__: Tuple = roberta_layer.fca.bias
# output
lowerCamelCase__: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Any = roberta_layer.fca.weight
lowerCamelCase__: Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase__: Dict = roberta.model.classification_heads["""mnli"""].dense.weight
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.bias
lowerCamelCase__: str = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowerCamelCase__: List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase__: Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.weight
lowerCamelCase__: Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase__: Dict = model(_UpperCamelCase )[0]
if classification_head:
lowerCamelCase__: Optional[int] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_UpperCamelCase ) )
else:
lowerCamelCase__: List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: List[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_lowercase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 306
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ ):
snake_case_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(UpperCAmelCase_ )
self.set_fail_transitions()
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(UpperCAmelCase_ , UpperCAmelCase_ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
snake_case_ = len(self.adlist ) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(UpperCAmelCase_ )
snake_case_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(UpperCAmelCase_ , self.adlist[child]["value"] ) is None
and state != 0
):
snake_case_ = self.adlist[state]["fail_state"]
snake_case_ = self.find_next_state(
UpperCAmelCase_ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowercase ( self , UpperCAmelCase_ ):
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(UpperCAmelCase_ ) ):
while (
self.find_next_state(UpperCAmelCase_ , string[i] ) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]["fail_state"]
snake_case_ = self.find_next_state(UpperCAmelCase_ , string[i] )
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(UpperCAmelCase_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 420
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """gpt_neox"""
def __init__( self , UpperCAmelCase_=5_04_32 , UpperCAmelCase_=61_44 , UpperCAmelCase_=44 , UpperCAmelCase_=64 , UpperCAmelCase_=2_45_76 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.25 , UpperCAmelCase_=1_00_00 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=20_48 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_=True , UpperCAmelCase_=0 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = rotary_pct
snake_case_ = rotary_emb_base
snake_case_ = attention_dropout
snake_case_ = hidden_dropout
snake_case_ = classifier_dropout
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = use_parallel_residual
snake_case_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowercase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''' )
snake_case_ = self.rope_scaling.get("type" , UpperCAmelCase_ )
snake_case_ = self.rope_scaling.get("factor" , UpperCAmelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 420
| 1
|
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def snake_case ( A__ = "isbn/0140328726" ):
UpperCAmelCase_ : int = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCAmelCase_ : Optional[int] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(A__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCAmelCase_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase_ : Tuple = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCAmelCase_ : Optional[int] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(A__ ,A__ ):
UpperCAmelCase_ : Dict = ", ".join(A__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowerCamelCase_ = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 95
|
'''simple docstring'''
import socket
def lowerCAmelCase__ ( ):
_A : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_A : List[Any] = socket.gethostname()
_A : List[str] = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 128
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : Tuple = val
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase : Dict = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : List[str] = value
return new_state_dict
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : int = ''''''
if is_panoptic:
__UpperCAmelCase : List[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCAmelCase : Dict = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : Any = in_proj_weight[:256, :]
__UpperCAmelCase : Dict = in_proj_bias[:256]
__UpperCAmelCase : str = in_proj_weight[256:512, :]
__UpperCAmelCase : Any = in_proj_bias[256:512]
__UpperCAmelCase : List[str] = in_proj_weight[-256:, :]
__UpperCAmelCase : Tuple = in_proj_bias[-256:]
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCAmelCase : Dict = '''resnet101'''
if "dc5" in model_name:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Optional[Any] = '''panoptic''' in model_name
if is_panoptic:
__UpperCAmelCase : Optional[int] = 250
else:
__UpperCAmelCase : Dict = 91
__UpperCAmelCase : Optional[Any] = '''huggingface/label-files'''
__UpperCAmelCase : int = '''coco-detection-id2label.json'''
__UpperCAmelCase : Tuple = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Optional[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCAmelCase : Any = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__UpperCAmelCase : str = ConditionalDetrImageProcessor(format=lowercase_ )
# prepare image
__UpperCAmelCase : Optional[int] = prepare_img()
__UpperCAmelCase : Optional[int] = image_processor(images=lowercase_ , return_tensors='''pt''' )
__UpperCAmelCase : List[Any] = encoding['''pixel_values''']
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__UpperCAmelCase : Optional[Any] = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval()
__UpperCAmelCase : Any = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCAmelCase : str = '''conditional_detr.''' + src
rename_key(lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Any = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__UpperCAmelCase : Union[str, Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase : Tuple = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__UpperCAmelCase : Dict = state_dict.pop(lowercase_ )
__UpperCAmelCase : str = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__UpperCAmelCase : List[Any] = state_dict.pop(lowercase_ )
__UpperCAmelCase : int = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : str = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__UpperCAmelCase : List[Any] = conditional_detr(lowercase_ )
__UpperCAmelCase : int = model(lowercase_ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 675
|
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase_ ( snake_case_ : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def lowerCAmelCase_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase_ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(snake_case_ , snake_case_ )
# Predict target for test data
UpperCAmelCase_ = xgb.predict(snake_case_ )
UpperCAmelCase_ = predictions.reshape(len(snake_case_ ) , 1 )
return predictions
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = fetch_california_housing()
UpperCAmelCase_ , UpperCAmelCase_ = data_handling(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 , random_state=1 )
UpperCAmelCase_ = xgboost(snake_case_ , snake_case_ , snake_case_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(snake_case_ , snake_case_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(snake_case_ , snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 78
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE : Union[str, Any] = NewType('''DataClass''', Any)
SCREAMING_SNAKE_CASE : Tuple = NewType('''DataClassType''', Any)
def __UpperCAmelCase ( snake_case_ : Tuple ) -> List[Any]:
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __UpperCAmelCase ( snake_case_ : list ) -> Callable[[str], Any]:
"""simple docstring"""
_lowerCAmelCase = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def __UpperCAmelCase ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : Union[str, Any] , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCAmelCase = {}
if aliases is not None:
_lowerCAmelCase = aliases
if help is not None:
_lowerCAmelCase = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 42
def __init__(self , lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
if "formatter_class" not in kwargs:
_lowerCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**lowerCamelCase )
if dataclasses.is_dataclass(lowerCamelCase ):
_lowerCAmelCase = [dataclass_types]
_lowerCAmelCase = list(lowerCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowerCamelCase )
@staticmethod
def A__ (lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = f"""--{field.name}"""
_lowerCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , lowerCamelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_lowerCAmelCase = kwargs.pop("""aliases""" , [] )
if isinstance(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = [aliases]
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(lowerCamelCase , """UnionType""" ) and isinstance(lowerCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowerCamelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(lowerCamelCase ) not in field.type.__args__:
# filter `str` in Union
_lowerCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCAmelCase = (
field.type.__args__[0] if isinstance(lowerCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_lowerCAmelCase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , lowerCamelCase ) and issubclass(field.type , lowerCamelCase )):
if origin_type is Literal:
_lowerCAmelCase = field.type.__args__
else:
_lowerCAmelCase = [x.value for x in field.type]
_lowerCAmelCase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_lowerCAmelCase = field.default
else:
_lowerCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCAmelCase = copy(lowerCamelCase )
# Hack because type=bool in argparse does not behave as we want.
_lowerCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCAmelCase = """?"""
# This is the value that will get picked if we do --field_name (without value)
_lowerCAmelCase = True
elif isclass(lowerCamelCase ) and issubclass(lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = field.type.__args__[0]
_lowerCAmelCase = """+"""
if field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCAmelCase = True
else:
_lowerCAmelCase = field.type
if field.default is not dataclasses.MISSING:
_lowerCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCAmelCase = field.default_factory()
else:
_lowerCAmelCase = True
parser.add_argument(lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCAmelCase = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
if hasattr(lowerCamelCase , """_argument_group_name""" ):
_lowerCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
_lowerCAmelCase = self
try:
_lowerCAmelCase = get_type_hints(lowerCamelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(lowerCamelCase ):
_lowerCAmelCase = """.""".join(map(lowerCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(lowerCamelCase ):
if not field.init:
continue
_lowerCAmelCase = type_hints[field.name]
self._parse_dataclass_field(lowerCamelCase , lowerCamelCase )
def A__ (self , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_lowerCAmelCase = []
if args_filename:
args_files.append(Path(lowerCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCAmelCase = ArgumentParser()
args_file_parser.add_argument(lowerCamelCase , type=lowerCamelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCAmelCase , _lowerCAmelCase = args_file_parser.parse_known_args(args=lowerCamelCase )
_lowerCAmelCase = vars(lowerCamelCase ).get(args_file_flag.lstrip("""-""" ) , lowerCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(lowerCamelCase ) for p in cmd_args_file_paths] )
_lowerCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCAmelCase , _lowerCAmelCase = self.parse_known_args(args=lowerCamelCase )
_lowerCAmelCase = []
for dtype in self.dataclass_types:
_lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
_lowerCAmelCase = {k: v for k, v in vars(lowerCamelCase ).items() if k in keys}
for k in keys:
delattr(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowerCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = set(args.keys() )
_lowerCAmelCase = []
for dtype in self.dataclass_types:
_lowerCAmelCase = {f.name for f in dataclasses.fields(lowerCamelCase ) if f.init}
_lowerCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_lowerCAmelCase = dtype(**lowerCamelCase )
outputs.append(lowerCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(lowerCamelCase )}""" )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
with open(Path(lowerCamelCase ) , encoding="""utf-8""" ) as open_json_file:
_lowerCAmelCase = json.loads(open_json_file.read() )
_lowerCAmelCase = self.parse_dict(lowerCamelCase , allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False ):
'''simple docstring'''
_lowerCAmelCase = self.parse_dict(yaml.safe_load(Path(lowerCamelCase ).read_text() ) , allow_extra_keys=lowerCamelCase )
return tuple(lowerCamelCase )
| 156
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger()
def UpperCamelCase ( _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : LevitConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__a = timm.create_model("""levit_128s""" , pretrained=_lowerCAmelCase )
else:
__a = timm.create_model("""levit_128""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 192:
__a = timm.create_model("""levit_192""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 256:
__a = timm.create_model("""levit_256""" , pretrained=_lowerCAmelCase )
if hidden_sizes == 384:
__a = timm.create_model("""levit_384""" , pretrained=_lowerCAmelCase )
from_model.eval()
__a = LevitForImageClassificationWithTeacher(_lowerCAmelCase ).eval()
__a = OrderedDict()
__a = from_model.state_dict()
__a = list(from_model.state_dict().keys() )
__a = list(our_model.state_dict().keys() )
print(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for i in range(len(_lowerCAmelCase ) ):
__a = weights[og_keys[i]]
our_model.load_state_dict(_lowerCAmelCase )
__a = torch.randn((2, 3, 224, 224) )
__a = from_model(_lowerCAmelCase )
__a = our_model(_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one."
__a = name
print(_lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__a = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def UpperCamelCase ( _lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__a = """imagenet-1k-id2label.json"""
__a = 1000
__a = (1, num_labels)
__a = """huggingface/label-files"""
__a = num_labels
__a = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__a = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__a = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""levit-dump-folder/""",
type=Path,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
__A = parser.parse_args()
__A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 173
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
__a = (boundary[1] - boundary[0]) / steps
__a = boundary[0]
__a = boundary[1]
__a = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__a = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int ):
__a = a + h
while x < (b - h):
yield x
__a = x + h
def UpperCamelCase ( _lowerCAmelCase : int ): # enter your function here
__a = (x - 0) * (x - 0)
return y
def UpperCamelCase ( ):
__a = 0.0 # Lower bound of integration
__a = 1.0 # Upper bound of integration
__a = 10.0 # define number of steps or resolution
__a = [a, b] # define boundary of integration
__a = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 173
| 1
|
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : Dict = 2
while i * i <= n:
SCREAMING_SNAKE_CASE : Optional[int] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def A ( ):
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 248
|
def A ( _lowercase = 10**9 ):
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE : List[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 248
| 1
|
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase)
class lowercase ( __UpperCAmelCase):
def __init__( self : str , **_lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Any , _lowerCamelCase : Union[str, List[str], "Image", List["Image"]] , **_lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def a_ ( self : List[str] , **_lowerCamelCase : str ):
"""simple docstring"""
A_ : List[str] = {}
if "candidate_labels" in kwargs:
A_ : int = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
A_ : List[Any] = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def a_ ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Any="This is a photo of {}." ):
"""simple docstring"""
A_ : Dict = load_image(_lowerCamelCase )
A_ : Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework )
A_ : Union[str, Any] = candidate_labels
A_ : Optional[Any] = [hypothesis_template.format(_lowerCamelCase ) for x in candidate_labels]
A_ : List[Any] = self.tokenizer(_lowerCamelCase , return_tensors=self.framework , padding=_lowerCamelCase )
A_ : int = [text_inputs]
return inputs
def a_ ( self : Union[str, Any] , _lowerCamelCase : Tuple ):
"""simple docstring"""
A_ : Tuple = model_inputs.pop('''candidate_labels''' )
A_ : Optional[Any] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _lowerCamelCase ):
A_ : List[Any] = text_inputs[0]
else:
# Batching case.
A_ : int = text_inputs[0][0]
A_ : int = self.model(**_lowerCamelCase , **_lowerCamelCase )
A_ : List[str] = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def a_ ( self : Tuple , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : str = model_outputs.pop('''candidate_labels''' )
A_ : List[str] = model_outputs['''logits'''][0]
if self.framework == "pt":
A_ : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
A_ : Tuple = probs.tolist()
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : Union[str, Any] = [scores]
elif self.framework == "tf":
A_ : List[str] = stable_softmax(_lowerCamelCase , axis=-1 )
A_ : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
A_ : Optional[Any] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_lowerCamelCase , _lowerCamelCase ) , key=lambda _lowerCamelCase : -x[0] )
]
return result
| 361
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
| 1
|
from collections import deque
class a__ :
def __init__( self : List[str],_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = process_name # process name
SCREAMING_SNAKE_CASE_ : Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
SCREAMING_SNAKE_CASE_ : Optional[int] = arrival_time
SCREAMING_SNAKE_CASE_ : Tuple = burst_time # remaining burst time
SCREAMING_SNAKE_CASE_ : Optional[int] = 0 # total time of the process wait in ready queue
SCREAMING_SNAKE_CASE_ : int = 0 # time from arrival time to completion time
class a__ :
def __init__( self : int,_A : int,_A : list[int],_A : deque[Process],_A : int,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
SCREAMING_SNAKE_CASE_ : Dict = time_slices
# unfinished process is in this ready_queue
SCREAMING_SNAKE_CASE_ : Any = queue
# current time
SCREAMING_SNAKE_CASE_ : Union[str, Any] = current_time
# finished process is in this sequence queue
SCREAMING_SNAKE_CASE_ : deque[Process] = deque()
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCamelCase ( self : List[str],_A : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(_A ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCamelCase ( self : List[str],_A : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(len(_A ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCamelCase ( self : Dict,_A : list[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(len(_A ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCamelCase ( self : Any,_A : deque[Process] ):
"""simple docstring"""
return [q.burst_time for q in queue]
def __UpperCamelCase ( self : Any,_A : Process ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCamelCase ( self : Optional[int],_A : deque[Process] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : deque[Process] = deque() # sequence deque of finished process
while len(_A ) != 0:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_A )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
SCREAMING_SNAKE_CASE_ : Any = 0
# set the process's turnaround time because it is finished
SCREAMING_SNAKE_CASE_ : Optional[int] = self.current_time - cp.arrival_time
# set the completion time
SCREAMING_SNAKE_CASE_ : str = self.current_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCamelCase ( self : Optional[Any],_A : deque[Process],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_A )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
SCREAMING_SNAKE_CASE_ : int = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_A )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
SCREAMING_SNAKE_CASE_ : List[str] = 0
# set the finish time
SCREAMING_SNAKE_CASE_ : int = self.current_time
# update the process' turnaround time because it is finished
SCREAMING_SNAKE_CASE_ : str = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_A )
self.finish_queue.extend(_A ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.round_robin(
self.ready_queue,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCamelCase : List[Any] = Process('''P1''', 0, 53)
__lowerCamelCase : Optional[Any] = Process('''P2''', 0, 17)
__lowerCamelCase : Dict = Process('''P3''', 0, 68)
__lowerCamelCase : List[Any] = Process('''P4''', 0, 24)
__lowerCamelCase : Tuple = 3
__lowerCamelCase : Dict = [17, 25]
__lowerCamelCase : str = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
__lowerCamelCase : Any = Process('''P1''', 0, 53)
__lowerCamelCase : int = Process('''P2''', 0, 17)
__lowerCamelCase : Any = Process('''P3''', 0, 68)
__lowerCamelCase : List[str] = Process('''P4''', 0, 24)
__lowerCamelCase : Any = 3
__lowerCamelCase : int = [17, 25]
__lowerCamelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
__lowerCamelCase : Optional[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCamelCase : List[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 216
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLTokenizer
__lowerCamelCase : Union[str, Any] = data_utils.TransfoXLCorpus
__lowerCamelCase : Optional[Any] = data_utils
__lowerCamelCase : Optional[int] = data_utils
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCAmelCase , "rb" ) as fp:
SCREAMING_SNAKE_CASE_ : str = pickle.load(lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE_ : Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
SCREAMING_SNAKE_CASE_ : int = corpus.vocab.__dict__
torch.save(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(lowerCAmelCase , lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(lowerCAmelCase )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE_ : Dict = TransfoXLConfig.from_json_file(lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : int = TransfoXLLMHeadModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = load_tf_weights_in_transfo_xl(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = os.path.join(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {os.path.abspath(lowerCAmelCase )}' )
torch.save(model.state_dict() , lowerCAmelCase )
print(f'Save configuration file to {os.path.abspath(lowerCAmelCase )}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
__lowerCamelCase : Dict = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 216
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[int] ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
_lowerCAmelCase = size if size is not None else {'shortest_edge': 224}
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
_lowerCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='crop_size' )
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
_lowerCAmelCase = resample
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCamelCase ( self : str , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" in size:
_lowerCAmelCase = get_resize_output_image_size(UpperCAmelCase_ , size['shortest_edge'] , default_to_square=UpperCAmelCase_ )
elif "height" in size and "width" in size:
_lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : int , ) -> Any:
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase = to_numpy_array(UpperCAmelCase_ )
if do_resize:
_lowerCAmelCase = self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ )
if do_center_crop:
_lowerCAmelCase = self.center_crop(UpperCAmelCase_ , size=UpperCAmelCase_ )
if do_rescale:
_lowerCAmelCase = self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ )
if do_normalize:
_lowerCAmelCase = self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ )
_lowerCAmelCase = to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ )
return image
def __lowerCamelCase ( self : int , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : int , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase = resample if resample is not None else self.resample
_lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase = image_std if image_std is not None else self.image_std
_lowerCAmelCase = size if size is not None else self.size
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
_lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='crop_size' )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_lowerCAmelCase = make_batched(UpperCAmelCase_ )
_lowerCAmelCase = [
[
self._preprocess_image(
image=UpperCAmelCase_ , do_resize=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , do_center_crop=UpperCAmelCase_ , crop_size=UpperCAmelCase_ , do_rescale=UpperCAmelCase_ , rescale_factor=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , )
for img in video
]
for video in videos
]
_lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 491
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[int]="None" , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = relative_attention
_lowerCAmelCase = position_biased_input
_lowerCAmelCase = pos_att_type
_lowerCAmelCase = scope
def __lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaModel(config=UpperCAmelCase_ )
_lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(UpperCAmelCase_ )
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaForMaskedLM(config=UpperCAmelCase_ )
_lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDebertaVaForSequenceClassification(config=UpperCAmelCase_ )
_lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFDebertaVaForTokenClassification(config=UpperCAmelCase_ )
_lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaForQuestionAnswering(config=UpperCAmelCase_ )
_lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_: str = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: List[str] = False
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __lowerCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def __lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def __lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(UpperCAmelCase_ )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass
@slow
def __lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_lowerCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
_lowerCAmelCase = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_lowerCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_lowerCAmelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 )
| 491
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 621
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE_: List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE_: Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_: Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE_: Any = frozenset([] )
def A ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase_ , )
_SCREAMING_SNAKE_CASE : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase_ )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
_SCREAMING_SNAKE_CASE : Dict = CLIPTextModel(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_SCREAMING_SNAKE_CASE : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Optional[int]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE : Any = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
_SCREAMING_SNAKE_CASE : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowerCAmelCase_ ).startswith('mps' ):
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(lowerCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A ( self ) -> str:
_SCREAMING_SNAKE_CASE : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : Any = StableDiffusionInpaintPipeline(**lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(lowerCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase_ ).images
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_SCREAMING_SNAKE_CASE : List[str] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def A ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase_ , safety_checker=lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self ) -> int:
_SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_SCREAMING_SNAKE_CASE : Optional[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , torch_dtype=torch.floataa , safety_checker=lowerCAmelCase_ , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : List[Any] = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='np' , )
_SCREAMING_SNAKE_CASE : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self ) -> List[str]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_SCREAMING_SNAKE_CASE : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_SCREAMING_SNAKE_CASE : str = 'stabilityai/stable-diffusion-2-inpainting'
_SCREAMING_SNAKE_CASE : List[Any] = PNDMScheduler.from_pretrained(lowerCAmelCase_ , subfolder='scheduler' )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
lowerCAmelCase_ , safety_checker=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_SCREAMING_SNAKE_CASE : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=lowerCAmelCase_ , image=lowerCAmelCase_ , mask_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=2 , output_type='np' , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 621
| 1
|
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class _lowerCAmelCase ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase__ = " " ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = sentence_delimiter
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return list(UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
__snake_case = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
__snake_case = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Optional[int]:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
snake_case : Optional[int] = 0
snake_case : int = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 117
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
snake_case : int = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case : str = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Any = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : str = 16
elif accelerator.mixed_precision != "no":
snake_case : List[Any] = 8
else:
snake_case : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case : Any = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( lowercase : Dict , lowercase : int ) -> Tuple:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
snake_case : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Dict = int(config["num_epochs"] )
snake_case : int = int(config["seed"] )
snake_case : Tuple = int(config["batch_size"] )
set_seed(lowercase )
snake_case ,snake_case : List[Any] = get_dataloaders(lowercase , lowercase )
snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Any = batch_size // MAX_GPU_BATCH_SIZE
snake_case : int = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : str = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Tuple = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : Union[str, Any] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case : str = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case : Any = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Dict = model(**lowercase )
snake_case : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Optional[int] = model(**lowercase )
snake_case : Tuple = outputs.logits.argmax(dim=-1 )
snake_case ,snake_case : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
snake_case : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : int = parser.parse_args()
snake_case : List[str] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 117
| 1
|
def UpperCamelCase ( _UpperCAmelCase : str ) -> list:
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(_UpperCAmelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 461
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
_A = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size for training."} )
_A = field(default=2 , metadata={"help": "Batch size for evaluation."} )
_A = field(default=0.1 , metadata={"help": "Value of weight decay."} )
_A = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_A = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
_A = field(default="cosine" , metadata={"help": "Learning rate."} )
_A = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_A = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
_A = field(
default=__snake_case , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_A = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
_A = field(default=1 , metadata={"help": "Training seed."} )
_A = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
_A = field(
default=__snake_case , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_A = field(default=__snake_case , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
_A = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
_A = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_A = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
_A = field(
default=__snake_case , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
_A = field(
default=__snake_case , metadata={"help": "Sample from the language model's output distribution."} )
_A = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
_A = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
_A = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
_A = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
_A = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
_A = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
_A = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
_A = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_A = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __lowercase :
_A = field(
default=__snake_case , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
_A = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
_A = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
_A = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_A = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_A = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_A = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_A = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
_A = field(
default=__snake_case , metadata={"help": "If True, near-duplicate samples are removed."} )
_A = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
_A = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
_A = field(default="content" , metadata={"help": "Column containing text data to process."} )
_A = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
_A = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
_A = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class __lowercase :
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
_A = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
_A = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
_A = field(default=__snake_case , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class __lowercase :
_A = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
_A = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
_A = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
_A = field(default=__snake_case , metadata={"help": "Push saved tokenizer to the hub."} )
| 461
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ : int = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[Any] = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__magic_name__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 602
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def A__ ( A_ ) -> str:
_lowercase = {}
_lowercase = job["started_at"]
_lowercase = job["completed_at"]
_lowercase = date_parser.parse(A_ )
_lowercase = date_parser.parse(A_ )
_lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
_lowercase = start
_lowercase = end
_lowercase = duration_in_min
return job_info
def A__ ( A_ , A_=None ) -> int:
_lowercase = None
if token is not None:
_lowercase = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""}
_lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_lowercase = requests.get(A_ , headers=A_ ).json()
_lowercase = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(A_ ) for job in result["jobs"]} )
_lowercase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A_ ):
_lowercase = requests.get(url + F"""&page={i + 2}""" , headers=A_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(A_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__magic_name__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : Union[str, Any] = get_job_time(args.workflow_run_id)
__magic_name__ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 602
| 1
|
lowercase_: Optional[int] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 648
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowercase__ :Tuple = TypeVar('T')
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = data
__UpperCAmelCase : Node[T] | None = None
def __str__( self : int ):
'''simple docstring'''
return f'''{self.data}'''
class snake_case ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Node[T] | None = None
def __iter__( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.top
while node:
yield node.data
__UpperCAmelCase : Dict = node.next
def __str__( self : Any ):
'''simple docstring'''
return "->".join([str(__lowercase ) for item in self] )
def __len__( self : int ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def A_ ( self : Tuple ):
'''simple docstring'''
return self.top is None
def A_ ( self : List[str] , __lowercase : T ):
'''simple docstring'''
__UpperCAmelCase : int = Node(__lowercase )
if not self.is_empty():
__UpperCAmelCase : int = self.top
__UpperCAmelCase : Tuple = node
def A_ ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __lowercase )
__UpperCAmelCase : List[str] = self.top
__UpperCAmelCase : List[str] = self.top.next
return pop_node.data
def A_ ( self : str ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 522
| 0
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : Tuple = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_A )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_A , id=_A )
| 19
|
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] )
| 19
| 1
|
lowerCAmelCase : Dict = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE_: Optional[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE_: List[str] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE_: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE_: int = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE_: int = [start]
SCREAMING_SNAKE_CASE_: str = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE_: List[str] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE_: Union[str, Any] = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE_: str = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 671
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCAmelCase : Union[str, Any] = _symbol_database.Default()
lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile(
b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCAmelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCAmelCase : Tuple = None
lowerCAmelCase : List[Any] = b'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCAmelCase : Tuple = 45
lowerCAmelCase : Optional[int] = 1581
lowerCAmelCase : Dict = 1517
lowerCAmelCase : Any = 1570
lowerCAmelCase : Any = 1584
lowerCAmelCase : Optional[Any] = 1793
lowerCAmelCase : Optional[Any] = 1795
lowerCAmelCase : List[str] = 1916
lowerCAmelCase : Any = 1864
lowerCAmelCase : Dict = 1905
lowerCAmelCase : Dict = 1919
lowerCAmelCase : Any = 2429
lowerCAmelCase : List[Any] = 2208
lowerCAmelCase : Tuple = 2418
lowerCAmelCase : List[Any] = 2323
lowerCAmelCase : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 214
| 0
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_lowerCamelCase = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
_lowerCamelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
_lowerCamelCase = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
_lowerCamelCase = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
_lowerCamelCase = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
_lowerCamelCase = ''''''
_lowerCamelCase = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
_lowerCamelCase = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
_lowerCamelCase = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] )-> Tuple:
"""simple docstring"""
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
a =ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] )-> Any:
"""simple docstring"""
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] )-> str:
"""simple docstring"""
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] )-> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a =Path(_lowercase ) / 'README.md'
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
a =ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str )-> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a =Path(_lowercase ) / 'README.md'
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
a =expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
a =ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any )-> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a =Path(_lowercase ) / 'README.md'
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
a =expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Any )-> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a =Path(_lowercase ) / 'README.md'
with open(_lowercase , """w+""" ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 704
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 321
| 0
|
from math import ceil
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = list(range(0, lowerCAmelCase_ ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase_ )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase_ ) )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = list(range(lowerCAmelCase_ ) )
__lowerCAmelCase = int(ceil(n_layers / len(lowerCAmelCase_ ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0, lowerCAmelCase_, lowerCAmelCase_ )]
return dict(zip(lowerCAmelCase_, lowerCAmelCase_ ) )
| 53
|
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__snake_case ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _SCREAMING_SNAKE_CASE ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__snake_case ):
http_head('https://huggingface.co' )
| 107
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
snake_case = "src/transformers"
# Matches is_xxx_available()
snake_case = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
snake_case = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
snake_case = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
snake_case = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
snake_case = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
snake_case = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
snake_case = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
snake_case = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
snake_case = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
snake_case = re.compile(r"^\s*try:")
# Catches a line with else:
snake_case = re.compile(r"^\s*else:")
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
_lowerCAmelCase : Tuple = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
with open(lowerCAmelCase__ , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
_lowerCAmelCase : Optional[int] = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
_lowerCAmelCase : List[str] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_lowerCAmelCase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
_lowerCAmelCase : Optional[Any] = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
_lowerCAmelCase : str = re.findall("\[([^\]]+)\]" , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_lowerCAmelCase : Any = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_lowerCAmelCase : List[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_lowerCAmelCase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : Union[str, Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_lowerCAmelCase : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
_lowerCAmelCase : List[Any] = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(", " )
_lowerCAmelCase : Dict = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
_lowerCAmelCase : List[Any] = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(", " )
_lowerCAmelCase : Optional[int] = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_lowerCAmelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_lowerCAmelCase : Tuple = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_lowerCAmelCase : Optional[Any] = lines[line_index]
_lowerCAmelCase : int = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_lowerCAmelCase : List[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
_lowerCAmelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_lowerCAmelCase : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_lowerCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_lowerCAmelCase : List[Any] = lines[line_index]
_lowerCAmelCase : Optional[int] = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_lowerCAmelCase : List[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
def find_duplicates(lowerCAmelCase__ ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_lowerCAmelCase : List[str] = []
for key in import_dict_objects.keys():
_lowerCAmelCase : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_lowerCAmelCase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_lowerCAmelCase : Optional[Any] = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Any = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , "__init__.py" )
_lowerCAmelCase : str = parse_init(lowerCAmelCase__ )
if objects is not None:
_lowerCAmelCase : List[Any] = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
_lowerCAmelCase : List[str] = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError("\n\n".join(lowerCAmelCase__ ) )
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob("*.py" ) ) ) == 0:
continue
_lowerCAmelCase : Dict = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
_lowerCAmelCase : Tuple = short_path.replace(os.path.sep , "." )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
_lowerCAmelCase : Tuple = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
_lowerCAmelCase : List[Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
snake_case = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(lowerCAmelCase__ , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCAmelCase__ ) > 0:
_lowerCAmelCase : List[str] = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 587
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Union[str, Any] = BlipImageProcessor()
_lowerCAmelCase : Any = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
_lowerCAmelCase : Optional[int] = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : int = InstructBlipProcessor(_snake_case , _snake_case , _snake_case )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).image_processor
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).qformer_tokenizer
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : int = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowerCAmelCase : List[str] = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
_lowerCAmelCase : int = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
self.assertIsInstance(processor.qformer_tokenizer , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : str = self.get_qformer_tokenizer()
_lowerCAmelCase : Any = InstructBlipProcessor(
tokenizer=_snake_case , image_processor=_snake_case , qformer_tokenizer=_snake_case )
_lowerCAmelCase : Any = self.prepare_image_inputs()
_lowerCAmelCase : Any = image_processor(_snake_case , return_tensors="np" )
_lowerCAmelCase : List[str] = processor(images=_snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
_lowerCAmelCase : Dict = InstructBlipProcessor(
tokenizer=_snake_case , image_processor=_snake_case , qformer_tokenizer=_snake_case )
_lowerCAmelCase : int = "lower newer"
_lowerCAmelCase : Dict = processor(text=_snake_case )
_lowerCAmelCase : Optional[int] = tokenizer(_snake_case , return_token_type_ids=_snake_case )
_lowerCAmelCase : str = qformer_tokenizer(_snake_case , return_token_type_ids=_snake_case )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Dict = self.get_qformer_tokenizer()
_lowerCAmelCase : Any = InstructBlipProcessor(
tokenizer=_snake_case , image_processor=_snake_case , qformer_tokenizer=_snake_case )
_lowerCAmelCase : Optional[Any] = "lower newer"
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : str = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_qformer_tokenizer()
_lowerCAmelCase : List[Any] = InstructBlipProcessor(
tokenizer=_snake_case , image_processor=_snake_case , qformer_tokenizer=_snake_case )
_lowerCAmelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : Optional[int] = processor.batch_decode(_snake_case )
_lowerCAmelCase : str = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Any = self.get_qformer_tokenizer()
_lowerCAmelCase : Optional[int] = InstructBlipProcessor(
tokenizer=_snake_case , image_processor=_snake_case , qformer_tokenizer=_snake_case )
_lowerCAmelCase : str = "lower newer"
_lowerCAmelCase : str = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 587
| 1
|
class __magic_name__ :
def __init__( self : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = ''
UpperCAmelCase = ''
UpperCAmelCase = []
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> int:
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase = self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
UpperCAmelCase = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
UpperCAmelCase = worda
UpperCAmelCase = worda
UpperCAmelCase = [[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = worda
UpperCAmelCase = worda
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = len(UpperCamelCase__ )
UpperCAmelCase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase = j
elif j == 0: # second string is empty
UpperCAmelCase = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase = self.dp[i - 1][j - 1]
else:
UpperCAmelCase = self.dp[i][j - 1]
UpperCAmelCase = self.dp[i - 1][j]
UpperCAmelCase = self.dp[i - 1][j - 1]
UpperCAmelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
__lowerCamelCase : Any = input("Enter the first string: ").strip()
__lowerCamelCase : List[Any] = input("Enter the second string: ").strip()
print()
print(F'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(F'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 323
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =CodeGenTokenizer
UpperCAmelCase =CodeGenTokenizerFast
UpperCAmelCase =True
UpperCAmelCase ={"add_prefix_space": True}
UpperCAmelCase =False
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Optional[Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_UpperCAmelCase : Dict =dict(zip(snake_case , range(len(snake_case))))
_UpperCAmelCase : List[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_UpperCAmelCase : List[Any] ={'unk_token': '<unk>'}
_UpperCAmelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def lowerCAmelCase ( self , **snake_case) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] ='lower newer'
_UpperCAmelCase : int ='lower newer'
return input_text, output_text
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase : str ='lower newer'
_UpperCAmelCase : Union[str, Any] =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_UpperCAmelCase : Any =tokenizer.tokenize(snake_case , add_prefix_space=snake_case)
self.assertListEqual(snake_case , snake_case)
_UpperCAmelCase : Any =tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[str] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case) , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple =self.get_tokenizer()
_UpperCAmelCase : Dict =self.get_rust_tokenizer(add_prefix_space=snake_case)
_UpperCAmelCase : Union[str, Any] ='lower newer'
# Testing tokenization
_UpperCAmelCase : List[str] =tokenizer.tokenize(snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : Optional[int] =rust_tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing conversion to ids without special tokens
_UpperCAmelCase : str =tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : Optional[int] =rust_tokenizer.encode(snake_case , add_special_tokens=snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing conversion to ids with special tokens
_UpperCAmelCase : Dict =self.get_rust_tokenizer(add_prefix_space=snake_case)
_UpperCAmelCase : Tuple =tokenizer.encode(snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : int =rust_tokenizer.encode(snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing the unknown token
_UpperCAmelCase : List[str] =tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : Union[str, Any] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case) , snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> Any:
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCAmelCase ( self , snake_case=1_5) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase : Optional[Any] =self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case)
# Simple input
_UpperCAmelCase : List[str] ='This is a simple input'
_UpperCAmelCase : Optional[Any] =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : int =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>')
# Simple input
_UpperCAmelCase : List[Any] ='This is a simple input'
_UpperCAmelCase : Any =['This is a simple input looooooooong', 'This is a simple input']
_UpperCAmelCase : List[str] =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Optional[int] =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_UpperCAmelCase : List[Any] =tokenizer.pad_token_id
_UpperCAmelCase : Union[str, Any] =tokenizer(snake_case , padding='max_length' , max_length=3_0 , return_tensors='np')
_UpperCAmelCase : List[Any] =tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np')
_UpperCAmelCase : Optional[Any] =tokenizer(*snake_case , padding='max_length' , max_length=6_0 , return_tensors='np')
_UpperCAmelCase : List[Any] =tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np')
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s['input_ids'])
self.assertTrue(0 in out_s['attention_mask'])
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0])
self.assertFalse(0 in out_sa['attention_mask'][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1])
self.assertTrue(0 in out_sa['attention_mask'][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p['input_ids'])
self.assertTrue(0 in out_p['attention_mask'])
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0])
self.assertFalse(0 in out_pa['attention_mask'][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1])
self.assertTrue(0 in out_pa['attention_mask'][1])
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any ='$$$'
_UpperCAmelCase : Any =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case , add_bos_token=snake_case)
_UpperCAmelCase : Optional[Any] ='This is a simple input'
_UpperCAmelCase : Any =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : int =tokenizer.bos_token_id
_UpperCAmelCase : Optional[Any] =tokenizer(snake_case)
_UpperCAmelCase : Tuple =tokenizer(snake_case)
self.assertEqual(out_s.input_ids[0] , snake_case)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
_UpperCAmelCase : Optional[int] =tokenizer.decode(out_s.input_ids)
_UpperCAmelCase : Any =tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , snake_case)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
_UpperCAmelCase : Optional[Any] ='\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_UpperCAmelCase : Tuple ='\nif len_a > len_b: result = a\nelse: result = b'
_UpperCAmelCase : Optional[int] =tokenizer.encode(snake_case)
_UpperCAmelCase : List[str] =['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n']
_UpperCAmelCase : Dict =tokenizer.decode(snake_case , truncate_before_pattern=snake_case)
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
| 446
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : List[str] = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 339
|
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__UpperCamelCase ), magnitude * sin(__UpperCamelCase )]
return [magnitude * cos(radians(__UpperCamelCase ) ), magnitude * sin(radians(__UpperCamelCase ) )]
def lowercase__ ( __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : NDArray[floataa] , __UpperCamelCase : float = 10**-1 ):
'''simple docstring'''
__lowercase = cross(__UpperCamelCase , __UpperCamelCase )
__lowercase = sum(__UpperCamelCase )
return abs(__UpperCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
snake_case : List[Any] = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
snake_case : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
snake_case : List[Any] = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
snake_case : List[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
snake_case : List[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
snake_case : str = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 339
| 1
|
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
A_ = TypeVar("""_T""")
class __lowerCamelCase ( Generic[_T] ):
def __init__( self , UpperCAmelCase = None ):
lowerCamelCase_ = list(iterable or [] )
lowerCamelCase_ = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def UpperCAmelCase__ ( self , UpperCAmelCase ):
self._stacka.append(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self._stacka.pop
lowerCamelCase_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29
|
def A ( _lowercase , _lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248
| 0
|
import numpy as np
import qiskit
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = None ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.default_rng(seed=SCREAMING_SNAKE_CASE_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE_ : List[Any] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE_ : List[str] = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE_ : Optional[Any] = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE_ : List[Any] = rng.integers(2 , size=SCREAMING_SNAKE_CASE_ )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE_ : int = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE_ , name='BB84' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
if alice_state[index] == 1:
bbaa_circ.x(SCREAMING_SNAKE_CASE_ )
if alice_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(SCREAMING_SNAKE_CASE_ ):
if bob_basis[index] == 1:
bbaa_circ.h(SCREAMING_SNAKE_CASE_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE_ : str = qiskit.Aer.get_backend('aer_simulator' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE_ : int = qiskit.execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=1 , seed_simulator=SCREAMING_SNAKE_CASE_ )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE_ : Tuple = job.result().get_counts(SCREAMING_SNAKE_CASE_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE_ : Tuple = ''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE_ : str = gen_key[:key_len] if len(SCREAMING_SNAKE_CASE_ ) >= key_len else gen_key.ljust(SCREAMING_SNAKE_CASE_ , '0' )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 700
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__: Dict = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Union[str, Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
@add_end_docstrings(A )
class __lowercase ( A ):
def __init__( self , *a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase_ ( self , a__=None , a__=None , a__=None ) -> Tuple:
'''simple docstring'''
A_ = {}
A_ = {}
if prompt is not None:
A_ = prompt
if generate_kwargs is not None:
A_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
A_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
A_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ) -> Optional[int]:
'''simple docstring'''
return super().__call__(a__ , **a__ )
def lowerCAmelCase_ ( self , a__ , a__=None ) -> str:
'''simple docstring'''
A_ = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
'''Note also that one single text can be provided for conditional image to text generation.''' )
A_ = self.model.config.model_type
if model_type == "git":
A_ = self.image_processor(images=a__ , return_tensors=self.framework )
A_ = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
A_ = [self.tokenizer.cls_token_id] + input_ids
A_ = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
A_ = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
A_ = self.image_processor(images=a__ , return_tensors=self.framework )
A_ = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
A_ = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
A_ = None
return model_inputs
def lowerCAmelCase_ ( self , a__ , a__=None ) -> int:
'''simple docstring'''
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , a__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
A_ = None
if generate_kwargs is None:
A_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
A_ = model_inputs.pop(self.model.main_input_name )
A_ = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def lowerCAmelCase_ ( self , a__ ) -> Any:
'''simple docstring'''
A_ = []
for output_ids in model_outputs:
A_ = {
'generated_text': self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 141
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( UpperCamelCase , unittest.TestCase ):
lowercase_ : Optional[int] = SpeechTaTokenizer
lowercase_ : Dict = False
lowercase_ : Any = True
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase :Union[str, Any] = SpeechTaTokenizer(UpperCAmelCase )
lowerCAmelCase :Any = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
lowerCAmelCase :List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str , UpperCAmelCase : Any ) -> Union[str, Any]:
lowerCAmelCase :Tuple = 'this is a test'
lowerCAmelCase :Union[str, Any] = 'this is a test'
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase :Dict = self.get_input_output_texts(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
lowerCAmelCase :List[Any] = '<pad>'
lowerCAmelCase :Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Any:
lowerCAmelCase :Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(UpperCAmelCase ) , 81 )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
lowerCAmelCase :Any = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase :Optional[int] = tokenizer.vocab_size
lowerCAmelCase :Union[str, Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase :Optional[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase :Union[str, Any] = tokenizer.add_tokens(UpperCAmelCase )
lowerCAmelCase :str = tokenizer.vocab_size
lowerCAmelCase :List[Any] = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
lowerCAmelCase :int = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase :List[Any] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase :str = tokenizer.add_special_tokens(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.vocab_size
lowerCAmelCase :Tuple = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
lowerCAmelCase :List[str] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : str ) -> int:
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
lowerCAmelCase :Optional[int] = self.get_tokenizer()
lowerCAmelCase :List[str] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
lowerCAmelCase :List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowerCAmelCase :Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowerCAmelCase :int = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
# Use custom sequence because this tokenizer does not handle numbers.
lowerCAmelCase :Any = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowerCAmelCase :List[str] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=UpperCAmelCase , )
| 553
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_snake_case : Tuple = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
_snake_case : str = cvtColor(img, COLOR_BGR2GRAY)
def _a ( ):
_SCREAMING_SNAKE_CASE = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _a ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _a ( ):
_SCREAMING_SNAKE_CASE = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _a ( ):
_SCREAMING_SNAKE_CASE = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_SCREAMING_SNAKE_CASE = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _a ( ):
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _a ( ):
# laplace diagonals
_SCREAMING_SNAKE_CASE = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_SCREAMING_SNAKE_CASE = conv.img_convolve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def _a ( ):
assert med.median_filter(_SCREAMING_SNAKE_CASE , 3 ).any()
def _a ( ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _a ( ):
_SCREAMING_SNAKE_CASE = sp.make_sepia(_SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _a ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" ):
_SCREAMING_SNAKE_CASE = bs.Burkes(imread(_SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _a ( _SCREAMING_SNAKE_CASE : str = "digital_image_processing/image_data/lena_small.jpg" , ):
_SCREAMING_SNAKE_CASE = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _a ( ):
_SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
_SCREAMING_SNAKE_CASE = imread(_SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = image[x_coordinate][y_coordinate]
_SCREAMING_SNAKE_CASE = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_SCREAMING_SNAKE_CASE = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_SCREAMING_SNAKE_CASE = lbp.local_binary_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 493
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : int = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 493
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
UpperCamelCase = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def _lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = (
list(range(ord("!" ), ord("~" ) + 1 ) ) + list(range(ord("¡" ), ord("¬" ) + 1 ) ) + list(range(ord("®" ), ord("ÿ" ) + 1 ) )
)
A__ = bs[:]
A__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
A__ = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_, UpperCAmelCase_ ) )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
A__ = set()
A__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A__ = char
return pairs
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle:
A__ = json.load(SCREAMING_SNAKE_CASE__ )
A__ = {v: k for k, v in self.encoder.items()}
A__ = errors # how to handle errors in decoding
A__ = bytes_to_unicode()
A__ = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle:
A__ = merges_handle.read().split("\n" )[1:-1]
A__ = [tuple(merge.split() ) for merge in bpe_merges]
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A__ = {}
A__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def snake_case__ ( self ) -> List[Any]:
return len(self.encoder )
def snake_case__ ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
if token in self.cache:
return self.cache[token]
A__ = tuple(SCREAMING_SNAKE_CASE__ )
A__ = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
A__ = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
A__ , A__ = bigram
A__ = []
A__ = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
A__ = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A__ = tuple(SCREAMING_SNAKE_CASE__ )
A__ = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
A__ = get_pairs(SCREAMING_SNAKE_CASE__ )
A__ = " ".join(SCREAMING_SNAKE_CASE__ )
A__ = word
return word
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
A__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) )
return bpe_tokens
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Any:
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = "".join(SCREAMING_SNAKE_CASE__ )
A__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" )
A__ = 0
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
A__ = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" )
index += 1
return vocab_file, merge_file
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
A__ = " " + text
return (text, kwargs)
| 104
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase_( snake_case__: int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
UpperCAmelCase__ = QuantumRegister(snake_case__ , 'qr' )
UpperCAmelCase__ = ClassicalRegister(snake_case__ , 'cr' )
UpperCAmelCase__ = QuantumCircuit(snake_case__ , snake_case__ )
UpperCAmelCase__ = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
UpperCAmelCase__ = Aer.get_backend('qasm_simulator' )
UpperCAmelCase__ = execute(snake_case__ , snake_case__ , shots=1_00_00 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 146
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowercase__ = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
lowercase__ = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
lowercase__ = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] ):
return float((preds == labels).mean() )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]="binary" ):
__a : Optional[Any] = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__a : Union[str, Any] = float(fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase , average=_lowerCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
__a : Dict = {}
for id_pred, label in zip(_lowerCamelCase , _lowerCamelCase ):
__a : Dict = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
__a : List[Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__a : Optional[int] = [(pred, label)]
__a : Optional[int] = [], []
for question, preds_labels in question_map.items():
__a : Dict = zip(*_lowerCamelCase )
__a : str = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase , average="""macro""" )
fas.append(_lowerCamelCase )
__a : str = int(sum(pred == label for pred, label in preds_labels ) == len(_lowerCamelCase ) )
ems.append(_lowerCamelCase )
__a : Dict = float(sum(_lowerCamelCase ) / len(_lowerCamelCase ) )
__a : Union[str, Any] = sum(_lowerCamelCase ) / len(_lowerCamelCase )
__a : List[Any] = float(fa_score(y_true=_lowerCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_lowercase , _lowercase )}
elif self.config_name == "cb":
return acc_and_fa(_lowercase , _lowercase , fa_avg="""macro""" )
elif self.config_name == "record":
__a : List[str] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
__a : List[Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_lowercase , _lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_lowercase , _lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_lowercase , _lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 717
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63
| 0
|
"""simple docstring"""
from math import factorial
def __a ( A = 100 ) -> int:
'''simple docstring'''
return sum(int(A ) for x in str(factorial(A ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 337
|
"""simple docstring"""
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = [0] * len(A )
A__ = []
A__ = []
A__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A ) ):
if indegree[i] == 0:
queue.append(A )
while queue:
A__ = queue.pop(0 )
cnt += 1
topo.append(A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A )
if cnt != len(A ):
print("Cycle exists" )
else:
print(A )
# Adjacency List of Graph
__UpperCAmelCase ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 337
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __lowercase :
def __init__( self : Union[str, Any] ,A : str ,A : int=13 ,A : Union[str, Any]=7 ,A : str=False ,A : Union[str, Any]=True ,A : Optional[Any]=False ,A : str=False ,A : List[str]=19 ,A : str=32 ,A : str=5 ,A : str=4 ,A : Optional[int]=37 ,A : Any="gelu" ,A : Optional[int]=0.1 ,A : Optional[Any]=0.1 ,A : Tuple=512 ,A : str=16 ,A : int=2 ,A : Union[str, Any]=0.0_2 ,A : Union[str, Any]=3 ,A : str=4 ,A : List[str]=None ,):
'''simple docstring'''
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[int] = use_input_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Tuple = use_labels
UpperCAmelCase__ : Optional[Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Tuple = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : List[Any] = type_sequence_label_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : List[Any] = scope
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase__ : Dict = None
if self.use_input_mask:
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : List[Any] = None
if self.use_labels:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = EsmConfig(
vocab_size=33 ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,is_folding_model=A ,esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} ,)
return config
def __lowercase ( self : Union[str, Any] ,A : Tuple ,A : int ,A : Tuple ,A : Optional[Any] ,A : Optional[Any] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = EsmForProteinFolding(config=A ).float()
model.to(A )
model.eval()
UpperCAmelCase__ : Dict = model(A ,attention_mask=A )
UpperCAmelCase__ : List[Any] = model(A )
UpperCAmelCase__ : int = model(A )
self.parent.assertEqual(result.positions.shape ,(8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape ,(8, self.batch_size, self.seq_length, 7, 2) )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
UpperCAmelCase__ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = False
snake_case_ = (EsmForProteinFolding,) if is_torch_available() else ()
snake_case_ = ()
snake_case_ = {} if is_torch_available() else {}
snake_case_ = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = EsmFoldModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self ,config_class=A ,hidden_size=37 )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip("""Does not support attention outputs""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : int ):
'''simple docstring'''
pass
@require_torch
class __lowercase ( __lowerCamelCase ):
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
UpperCAmelCase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase__ : Tuple = model(A )["""positions"""]
UpperCAmelCase__ : Optional[int] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] ,dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] ,A ,atol=1e-4 ) )
| 194
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCAmelCase__ : Any = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowercase ( __lowerCamelCase ):
def __init__( self : str ,A : MultilingualCLIP ,A : XLMRobertaTokenizer ,A : UNetaDConditionModel ,A : Union[DDIMScheduler, DDPMScheduler] ,A : VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=A ,tokenizer=A ,unet=A ,scheduler=A ,movq=A ,)
UpperCAmelCase__ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : Dict ,A : Any ,A : Tuple ,A : Dict ,A : int ,A : str ,A : List[str] ):
'''simple docstring'''
if latents is None:
UpperCAmelCase__ : Any = randn_tensor(A ,generator=A ,device=A ,dtype=A )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase__ : int = latents.to(A )
UpperCAmelCase__ : Any = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : Optional[int] ,A : List[Any] ,A : Optional[Any] ,A : str ,A : Optional[Any] ,A : str=None ,):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = len(A ) if isinstance(A ,A ) else 1
# get prompt text embeddings
UpperCAmelCase__ : List[Any] = self.tokenizer(
A ,padding="""max_length""" ,truncation=A ,max_length=77 ,return_attention_mask=A ,add_special_tokens=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : List[str] = text_inputs.input_ids
UpperCAmelCase__ : Any = self.tokenizer(A ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A ,A ):
UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase__ : str = text_input_ids.to(A )
UpperCAmelCase__ : Optional[Any] = text_inputs.attention_mask.to(A )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.text_encoder(
input_ids=A ,attention_mask=A )
UpperCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : List[str] = text_mask.repeat_interleave(A ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : List[Any] = [""""""] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="
f" {type(A )}." )
elif isinstance(A ,A ):
UpperCAmelCase__ : Any = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase__ : List[Any] = negative_prompt
UpperCAmelCase__ : Any = self.tokenizer(
A ,padding="""max_length""" ,max_length=77 ,truncation=A ,return_attention_mask=A ,add_special_tokens=A ,return_tensors="""pt""" ,)
UpperCAmelCase__ : Optional[int] = uncond_input.input_ids.to(A )
UpperCAmelCase__ : str = uncond_input.attention_mask.to(A )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.text_encoder(
input_ids=A ,attention_mask=A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Any = negative_prompt_embeds.shape[1]
UpperCAmelCase__ : Any = negative_prompt_embeds.repeat(1 ,A )
UpperCAmelCase__ : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,A )
UpperCAmelCase__ : Dict = uncond_text_encoder_hidden_states.shape[1]
UpperCAmelCase__ : Any = uncond_text_encoder_hidden_states.repeat(1 ,A ,1 )
UpperCAmelCase__ : Any = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,A ,-1 )
UpperCAmelCase__ : List[Any] = uncond_text_mask.repeat_interleave(A ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : Any = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCAmelCase__ : int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCAmelCase__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowercase ( self : Tuple ,A : Dict=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase__ : str = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase__ : Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A ,A )
def __lowercase ( self : int ,A : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase__ : List[str] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(A ,A ,prev_module_hook=A )
if self.safety_checker is not None:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,A ,prev_module_hook=A )
# We'll offload the last model manually.
UpperCAmelCase__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : Optional[Union[str, List[str]]] = None ,A : int = 512 ,A : int = 512 ,A : int = 100 ,A : float = 4.0 ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[torch.FloatTensor] = None ,A : Optional[str] = "pil" ,A : bool = True ,):
'''simple docstring'''
if isinstance(A ,A ):
UpperCAmelCase__ : str = 1
elif isinstance(A ,A ):
UpperCAmelCase__ : Tuple = len(A )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(A )}" )
UpperCAmelCase__ : Optional[int] = self._execution_device
UpperCAmelCase__ : Dict = batch_size * num_images_per_prompt
UpperCAmelCase__ : Union[str, Any] = guidance_scale > 1.0
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt(
A ,A ,A ,A ,A )
if isinstance(A ,A ):
UpperCAmelCase__ : Dict = torch.cat(A ,dim=0 )
if isinstance(A ,A ):
UpperCAmelCase__ : Dict = torch.cat(A ,dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ : Optional[int] = image_embeds.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : Tuple = negative_image_embeds.repeat_interleave(A ,dim=0 )
UpperCAmelCase__ : List[str] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=A )
self.scheduler.set_timesteps(A ,device=A )
UpperCAmelCase__ : int = self.scheduler.timesteps
UpperCAmelCase__ : Union[str, Any] = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_new_h_w(A ,A ,self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,A ,A ,A ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
UpperCAmelCase__ : Any = self.unet(
sample=A ,timestep=A ,encoder_hidden_states=A ,added_cond_kwargs=A ,return_dict=A ,)[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = noise_pred.split(latents.shape[1] ,dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ : str = variance_pred.chunk(2 )
UpperCAmelCase__ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ : List[Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Optional[Any] = self.scheduler.step(
A ,A ,A ,generator=A ,).prev_sample
# post-processing
UpperCAmelCase__ : List[Any] = self.movq.decode(A ,force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase__ : Union[str, Any] = image * 0.5 + 0.5
UpperCAmelCase__ : Optional[Any] = image.clamp(0 ,1 )
UpperCAmelCase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ : List[str] = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 194
| 1
|
_lowercase = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 306
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( A__ ):
def __init__( self : Tuple , *__a : Tuple , __a : Dict=None , __a : List[str]=None , **__a : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase__: str = eval_examples
lowerCamelCase__: Optional[int] = post_process_function
def lowerCamelCase_ ( self : str , __a : Optional[Dataset] = None , __a : List[Any]=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Tuple , ):
'''simple docstring'''
lowerCamelCase__: Tuple = gen_kwargs.copy()
lowerCamelCase__: Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
lowerCamelCase__: Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
lowerCamelCase__: Optional[Any] = gen_kwargs
lowerCamelCase__: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__: Union[str, Any] = self.get_eval_dataloader(__a )
lowerCamelCase__: Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Optional[int] = self.compute_metrics
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: Dict = time.time()
lowerCamelCase__: Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: Any = eval_loop(
__a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase__: Tuple = self.post_process_function(__a , __a , __a )
lowerCamelCase__: List[Any] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase__: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__: List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def lowerCamelCase_ ( self : str , __a : List[str] , __a : List[Any] , __a : Tuple=None , __a : str = "test" , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = gen_kwargs.copy()
lowerCamelCase__: Optional[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Any = self.compute_metrics
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: int = time.time()
lowerCamelCase__: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: List[str] = eval_loop(
__a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__: str = self.post_process_function(__a , __a , __a , """predict""" )
lowerCamelCase__: str = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 306
| 1
|
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> bool:
"""simple docstring"""
snake_case: set[int] =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case: set[int] =set()
return any(
node not in visited and depth_first_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for node in graph )
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
"""simple docstring"""
visited.add(__UpperCAmelCase )
rec_stk.add(__UpperCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__UpperCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 347
|
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Any =f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if number < 1:
snake_case: Tuple =f'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCAmelCase )
snake_case: int =1
for i in range(1 , __UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowercase : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase__( unittest.TestCase ):
__magic_name__ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ : Tuple = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ : Optional[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCAmelCase = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
UpperCAmelCase = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCAmelCase = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=lowerCAmelCase )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
UpperCAmelCase = text_classifier('''This is great !''' , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
UpperCAmelCase = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def a__( self : List[Any] )-> int:
"""simple docstring"""
import torch
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = pipeline('''text-classification''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = pipeline('''text-classification''' , framework='''tf''' )
UpperCAmelCase = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
UpperCAmelCase = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def a__( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = TextClassificationPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def a__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCAmelCase = '''HuggingFace is in'''
UpperCAmelCase = text_classifier(lowerCAmelCase )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
UpperCAmelCase = ['''HuggingFace is in ''', '''Paris is in France''']
UpperCAmelCase = text_classifier(lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}, {'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCAmelCase = text_classifier(lowerCAmelCase , top_k=lowerCAmelCase )
UpperCAmelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [[{'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}] * N, [{'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}] * N] , )
UpperCAmelCase = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
UpperCAmelCase = text_classifier(lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCAmelCase = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(lowerCAmelCase ):
text_classifier(lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCAmelCase = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{'''label''': ANY(lowerCAmelCase ), '''score''': ANY(lowerCAmelCase )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 210
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = "xlm"
__magic_name__ : str = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self : List[Any] , lowerCAmelCase : str=30145 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : str=16 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=False , lowerCAmelCase : Dict=1 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[Any]=2048**-0.5 , lowerCAmelCase : Tuple=1E-12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Dict=5 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]="first" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : int=2 , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = emb_dim
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = use_lang_emb
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = bos_index
UpperCAmelCase = eos_index
UpperCAmelCase = pad_index
UpperCAmelCase = unk_index
UpperCAmelCase = mask_index
UpperCAmelCase = is_encoder
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = embed_init_std
UpperCAmelCase = init_std
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_proj_to_labels
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = start_n_top
UpperCAmelCase = end_n_top
UpperCAmelCase = mask_token_id
UpperCAmelCase = lang_id
if "n_words" in kwargs:
UpperCAmelCase = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
@property
def a__( self : List[str] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 210
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
def __magic_name__ ( ):
__a : Optional[Any] = Node(1 )
__a : List[str] = Node(2 )
__a : Union[str, Any] = Node(3 )
__a : Tuple = Node(4 )
__a : int = Node(5 )
return tree
def __magic_name__ ( _lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( _lowerCamelCase : Node | None ):
__a : list[Any] = []
if root is None:
return output
__a : Tuple = deque([root] )
while process_queue:
__a : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( _lowerCamelCase : Node | None , _lowerCamelCase : int ):
__a : list[Any] = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( _lowerCamelCase : Node | None , _lowerCamelCase : int ):
__a : list[Any] = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( _lowerCamelCase : Node | None ):
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : List[str] = 0
__a : Optional[Any] = height(_lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase , _lowerCamelCase ) )
__a : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase , _lowerCamelCase ) )
__a : Optional[Any] = 0
return output
def __magic_name__ ( ): # Main function for testing.
__a : Union[str, Any] = make_tree()
print(F'''In-order Traversal: {inorder(_lowerCamelCase )}''' )
print(F'''Pre-order Traversal: {preorder(_lowerCamelCase )}''' )
print(F'''Post-order Traversal: {postorder(_lowerCamelCase )}''' , """\n""" )
print(F'''Height of Tree: {height(_lowerCamelCase )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_lowerCamelCase ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(_lowerCamelCase , level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCAmelCase__ = {'facebook/blenderbot-3B': 128}
class snake_case ( __lowercase ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = BlenderbotTokenizer
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = '''post_processor'''
SCREAMING_SNAKE_CASE_ = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE_ = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE_ = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ = add_prefix_space
SCREAMING_SNAKE_CASE_ = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE_ ) != trim_offsets:
SCREAMING_SNAKE_CASE_ = trim_offsets
SCREAMING_SNAKE_CASE_ = True
if changes_to_apply:
SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE_ , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE_ = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase (self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value
SCREAMING_SNAKE_CASE_ = value
def _lowercase (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.encode(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 626
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state''']
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 626
| 1
|
"""simple docstring"""
from collections import deque
class snake_case :
"""simple docstring"""
def __init__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = process_name # process name
UpperCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase__ = arrival_time
UpperCAmelCase__ = burst_time # remaining burst time
UpperCAmelCase__ = 0 # total time of the process wait in ready queue
UpperCAmelCase__ = 0 # time from arrival time to completion time
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,):
# total number of mlfq's queues
UpperCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase__ = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase__ = queue
# current time
UpperCAmelCase__ = current_time
# finished process is in this sequence queue
UpperCAmelCase__ = deque()
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ):
UpperCAmelCase__ = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : str ):
UpperCAmelCase__ = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Dict ):
return [q.burst_time for q in queue]
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[str] ):
UpperCAmelCase__ = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
UpperCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase__ = 0
# set the process's turnaround time because it is finished
UpperCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ):
UpperCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
UpperCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase__ = 0
# set the finish time
UpperCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowerCAmelCase ( self : List[str] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
UpperCAmelCase__ = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase__ : List[str] = Process('P1', 0, 53)
lowerCAmelCase__ : List[str] = Process('P2', 0, 17)
lowerCAmelCase__ : Dict = Process('P3', 0, 68)
lowerCAmelCase__ : Union[str, Any] = Process('P4', 0, 24)
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : Optional[Any] = [17, 25]
lowerCAmelCase__ : List[str] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase__ : Optional[Any] = Process('P1', 0, 53)
lowerCAmelCase__ : Tuple = Process('P2', 0, 17)
lowerCAmelCase__ : Optional[int] = Process('P3', 0, 68)
lowerCAmelCase__ : int = Process('P4', 0, 24)
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : Any = [17, 25]
lowerCAmelCase__ : Any = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase__ : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase__ : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 707
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowerCAmelCase__ : Optional[int] = False
class snake_case ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : Tuple ):
UpperCAmelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = pipe(
image=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
UpperCAmelCase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase__ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 632
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__magic_name__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__: Tuple = AudioClassificationPipeline(model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
# test with a raw waveform
UpperCAmelCase__: Union[str, Any] = np.zeros((3_4_0_0_0,) )
UpperCAmelCase__: Tuple = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__: Optional[Any] = examples
UpperCAmelCase__: List[Any] = audio_classifier(lowerCamelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
UpperCAmelCase__: Optional[int] = audio_classifier(lowerCamelCase__ , top_k=1 )
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
self.run_torchaudio(lowerCamelCase__ )
@require_torchaudio
def _UpperCAmelCase ( self , lowerCamelCase__ ):
import datasets
# test with a local file
UpperCAmelCase__: Dict = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
UpperCAmelCase__: Tuple = dataset[0]["audio"]["array"]
UpperCAmelCase__: Dict = audio_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
@require_torch
def _UpperCAmelCase ( self ):
UpperCAmelCase__: List[Any] = "anton-l/wav2vec2-random-tiny-classifier"
UpperCAmelCase__: Optional[Any] = pipeline("audio-classification" , model=lowerCamelCase__ )
UpperCAmelCase__: str = np.ones((8_0_0_0,) )
UpperCAmelCase__: Any = audio_classifier(lowerCamelCase__ , top_k=4 )
UpperCAmelCase__: Optional[int] = [
{"score": 0.0_842, "label": "no"},
{"score": 0.0_838, "label": "up"},
{"score": 0.0_837, "label": "go"},
{"score": 0.0_834, "label": "right"},
]
UpperCAmelCase__: int = [
{"score": 0.0_845, "label": "stop"},
{"score": 0.0_844, "label": "on"},
{"score": 0.0_841, "label": "right"},
{"score": 0.0_834, "label": "left"},
]
self.assertIn(nested_simplify(lowerCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase__: Any = {"array": np.ones((8_0_0_0,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase__: Optional[Any] = audio_classifier(lowerCamelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCAmelCase ( self ):
import datasets
UpperCAmelCase__: Tuple = "superb/wav2vec2-base-superb-ks"
UpperCAmelCase__: Dict = pipeline("audio-classification" , model=lowerCamelCase__ )
UpperCAmelCase__: Tuple = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" )
UpperCAmelCase__: Dict = np.array(dataset[3]["speech"] , dtype=np.floataa )
UpperCAmelCase__: List[str] = audio_classifier(lowerCamelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=3 ) , [
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
] , )
@require_tf
@unittest.skip("Audio classification is not implemented for TF" )
def _UpperCAmelCase ( self ):
pass
| 113
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowerCAmelCase : List[str] =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCAmelCase : List[str] =get_tests_dir("""fixtures/vocab.json""")
_lowerCAmelCase : Dict =get_tests_dir("""fixtures""")
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__magic_name__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = 0
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Any = WavaVecaConfig()
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__: int = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Optional[Any] = WavaVecaFeatureExtractor()
UpperCAmelCase__: List[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase__: Dict = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ )
# save in new folder
processor.save_pretrained(lowerCamelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f:
UpperCAmelCase__: Optional[int] = json.load(lowerCamelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCamelCase__ ) )
UpperCAmelCase__: Any = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: str = WavaVecaFeatureExtractor()
UpperCAmelCase__: Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
UpperCAmelCase__: List[Any] = WavaVecaProcessor(lowerCamelCase__ , lowerCamelCase__ )
# save in new folder
processor.save_pretrained(lowerCamelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "r" ) as f:
UpperCAmelCase__: str = json.load(lowerCamelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCamelCase__ ) )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__: Union[str, Any] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(lowerCamelCase__ )
# copy relevant files
copyfile(lowerCamelCase__ , os.path.join(lowerCamelCase__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , "w" ) as f:
f.write("{}" )
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def _UpperCAmelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
UpperCAmelCase__: Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
UpperCAmelCase__: Any = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCAmelCase__: Dict = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ , use_fast=lowerCamelCase__ )
UpperCAmelCase__: str = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _UpperCAmelCase ( self ):
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__: Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: Tuple = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase__: str = CustomTokenizer(lowerCamelCase__ )
UpperCAmelCase__: Optional[int] = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCamelCase__ )
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ):
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = False
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = False
class __UpperCamelCase ( _a ):
'''simple docstring'''
__magic_name__ = "AutoFeatureExtractor"
__magic_name__ = "AutoTokenizer"
__magic_name__ = False
try:
AutoConfig.register("custom" , lowerCamelCase__ )
AutoFeatureExtractor.register(lowerCamelCase__ , lowerCamelCase__ )
AutoTokenizer.register(lowerCamelCase__ , slow_tokenizer_class=lowerCamelCase__ )
AutoProcessor.register(lowerCamelCase__ , lowerCamelCase__ )
# If remote code is not set, the default is to use local classes.
UpperCAmelCase__: Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCamelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__magic_name__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCAmelCase ( cls ):
UpperCAmelCase__: Dict = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Optional[Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase__ , "test-processor" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCAmelCase__: Union[str, Any] = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ):
UpperCAmelCase__: Dict = WavaVecaProcessor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCamelCase__ , "test-processor-org" ) , push_to_hub=lowerCamelCase__ , use_auth_token=self._token , organization="valid_org" , )
UpperCAmelCase__: Optional[int] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase__ , getattr(new_processor.feature_extractor , lowerCamelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
UpperCAmelCase__: List[str] = CustomFeatureExtractor.from_pretrained(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__: Optional[int] = os.path.join(lowerCamelCase__ , "vocab.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCAmelCase__: Optional[Any] = CustomTokenizer(lowerCamelCase__ )
UpperCAmelCase__: str = CustomProcessor(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
UpperCAmelCase__: int = Repository(lowerCamelCase__ , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(lowerCamelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCamelCase__ , "tokenizer_config.json" ) ) as f:
UpperCAmelCase__: Tuple = json.load(lowerCamelCase__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase__ , "custom_processing.py" ) ) )
repo.push_to_hub()
UpperCAmelCase__: Union[str, Any] = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 113
| 1
|
def UpperCamelCase ( _a = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
lowercase_ :Tuple = limit + 1
lowercase_ :Dict = [0] * limit
for first_term in range(1 , _a ):
for n in range(_a , _a , _a ):
lowercase_ :Any = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
lowercase_ :Tuple = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 441
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , ):
lowercase_ :Dict = parent
lowercase_ :Optional[Any] = 13
lowercase_ :Optional[Any] = 7
lowercase_ :List[Any] = 30
lowercase_ :int = self.seq_length + self.mem_len
lowercase_ :Any = 15
lowercase_ :Optional[Any] = True
lowercase_ :List[Any] = True
lowercase_ :Any = 99
lowercase_ :Optional[int] = [10, 50, 80]
lowercase_ :Union[str, Any] = 32
lowercase_ :List[Any] = 32
lowercase_ :Tuple = 4
lowercase_ :Tuple = 8
lowercase_ :List[Any] = 128
lowercase_ :Any = 2
lowercase_ :Tuple = 2
lowercase_ :Dict = None
lowercase_ :Optional[Any] = 1
lowercase_ :Optional[int] = 0
lowercase_ :List[str] = 3
lowercase_ :Optional[int] = self.vocab_size - 1
lowercase_ :List[Any] = 0.01
def UpperCamelCase ( self ):
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = None
if self.use_labels:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLModel(UpperCamelCase_ )
lowercase_ , lowercase_ :List[Any] = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLLMHeadModel(UpperCamelCase_ )
lowercase_ , lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Optional[int] = model(UpperCamelCase_ ).to_tuple()
lowercase_ , lowercase_ :Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase_ , lowercase_ :Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = TFTransfoXLForSequenceClassification(UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
lowercase_ :str = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) :Tuple = config_and_inputs
lowercase_ :Dict = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : str =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase : Dict =() if is_tf_available() else ()
lowercase : List[str] =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase : Optional[int] =False
lowercase : Tuple =False
lowercase : Dict =False
lowercase : Dict =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFTransfoXLModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ , lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase_ :Dict = model_class(UpperCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase_ :str = model.get_output_embeddings()
assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer )
lowercase_ :Optional[int] = model.get_bias()
assert name is None
else:
lowercase_ :List[Any] = model.get_output_embeddings()
assert x is None
lowercase_ :Dict = model.get_bias()
assert name is None
def UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = TFTransfoXLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase ( self ):
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase_ :List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase_ :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase_ :Any = model.generate(UpperCamelCase_ , max_length=200 , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
| 441
| 1
|
'''simple docstring'''
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
_lowerCamelCase : Optional[int] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_lowerCamelCase : Any = 1
if upper_limit > 0:
_lowerCamelCase : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
UpperCAmelCase_ : Optional[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 44
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_activation('''gelu''')
self.assertTrue(torch.allclose(gelu_python(lowercase_) , torch_builtin(lowercase_)))
self.assertFalse(torch.allclose(gelu_python(lowercase_) , gelu_new(lowercase_)))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100])
SCREAMING_SNAKE_CASE_ : Dict = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : Tuple = get_activation('''gelu_10''')
SCREAMING_SNAKE_CASE_ : Any = torch_builtin(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = geluaa(lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(lowercase_).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
get_activation('''gelu''')
get_activation('''gelu_10''')
get_activation('''gelu_fast''')
get_activation('''gelu_new''')
get_activation('''gelu_python''')
get_activation('''gelu_pytorch_tanh''')
get_activation('''linear''')
get_activation('''mish''')
get_activation('''quick_gelu''')
get_activation('''relu''')
get_activation('''sigmoid''')
get_activation('''silu''')
get_activation('''swish''')
get_activation('''tanh''')
with self.assertRaises(lowercase_):
get_activation('''bogus''')
with self.assertRaises(lowercase_):
get_activation(lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = get_activation('''gelu''')
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : List[Any] = get_activation('''gelu''')
self.assertEqual(acta.a , 1)
with self.assertRaises(lowercase_):
SCREAMING_SNAKE_CASE_ : Dict = acta.a
| 512
| 0
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCAmelCase = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
UpperCAmelCase = 'zero2'
UpperCAmelCase = 'zero3'
UpperCAmelCase = [ZEROa, ZEROa]
def lowerCamelCase (a_ :Union[str, Any] , a_ :Tuple , a_ :Any) -> str:
lowercase :Union[str, Any] = parameterized.to_safe_name('''_'''.join(str(lowerCamelCase_) for x in param.args))
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def __snake_case ( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict ):
'''simple docstring'''
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def __snake_case ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict ):
'''simple docstring'''
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__lowerCamelCase , name_func=__lowerCamelCase )
def __snake_case ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Tuple ):
'''simple docstring'''
self.run_and_check(
stage=__lowerCamelCase , model=__lowerCamelCase , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
def __snake_case ( self : Optional[Any] , snake_case__ : int ):
'''simple docstring'''
pass
def __snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] = 1_0 , snake_case__ : List[Any] = True , snake_case__ : List[Any] = True , snake_case__ : List[str] = True , ):
'''simple docstring'''
lowercase :Tuple = models[model]
lowercase :Optional[Any] = self.run_trainer(
stage=__lowerCamelCase , model_name=__lowerCamelCase , eval_steps=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , fpaa=__lowerCamelCase , )
self.do_checks(__lowerCamelCase )
return output_dir
def __snake_case ( self : Optional[int] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str = 1_0 , snake_case__ : Union[str, Any] = 1 , snake_case__ : Optional[int] = True , snake_case__ : Optional[int] = True , ):
'''simple docstring'''
lowercase :Union[str, Any] = self.get_auto_remove_tmp_dir('''./xxx''' , after=__lowerCamelCase )
lowercase :Tuple = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase :Optional[int] = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowercase :Tuple = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowercase :Optional[Any] = self.get_launcher(__lowerCamelCase )
lowercase :Optional[int] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
return output_dir
def __snake_case ( self : Optional[int] , snake_case__ : Optional[Any]=False ):
'''simple docstring'''
lowercase :Dict = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 702
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 475
| 0
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ = 50000
lowercase__ = 5000
lowercase__ , lowercase__ = os.path.split(__file__)
lowercase__ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __magic_name__ ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : int ):
for i in range(_lowerCamelCase ):
__a : Any = dataset[i]
@get_duration
def __magic_name__ ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : List[str] , _lowerCamelCase : Any ):
for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ):
__a : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def __magic_name__ ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : int , _lowerCamelCase : Optional[int] ):
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(_lowerCamelCase ):
__a : Union[str, Any] = dataset[i]
@get_duration
def __magic_name__ ( _lowerCamelCase : datasets.Dataset , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
with dataset.formatted_as(type=_lowerCamelCase ):
for i in range(0 , _lowerCamelCase , _lowerCamelCase ):
__a : str = dataset[i : i + batch_size]
def __magic_name__ ( ):
__a : Dict = {"""num examples""": SPEED_TEST_N_EXAMPLES}
__a : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
__a : int = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
__a : Dict = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
__a : Tuple = generate_example_dataset(
os.path.join(_lowerCamelCase , """dataset.arrow""" ) , _lowerCamelCase , num_examples=_lowerCamelCase , seq_shapes={"""list""": (1_0_0,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(_lowerCamelCase ) )
__a : str = func(_lowerCamelCase , **_lowerCamelCase )
print("""shuffling dataset""" )
__a : Optional[Any] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(_lowerCamelCase ) )
__a : int = func(
_lowerCamelCase , **_lowerCamelCase )
with open(_lowerCamelCase , """wb""" ) as f:
f.write(json.dumps(_lowerCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 581
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "imagegpt"
_lowerCAmelCase = ["past_key_values"]
_lowerCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=512 + 1 , _lowercase=32 * 32 , _lowercase=512 , _lowercase=24 , _lowercase=8 , _lowercase=None , _lowercase="quick_gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=False , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : int = vocab_size
__a : Union[str, Any] = n_positions
__a : List[str] = n_embd
__a : Union[str, Any] = n_layer
__a : List[str] = n_head
__a : int = n_inner
__a : Any = activation_function
__a : List[str] = resid_pdrop
__a : str = embd_pdrop
__a : str = attn_pdrop
__a : Tuple = layer_norm_epsilon
__a : str = initializer_range
__a : Dict = scale_attn_weights
__a : Optional[int] = use_cache
__a : Optional[Any] = scale_attn_by_inverse_layer_idx
__a : Optional[Any] = reorder_and_upcast_attn
__a : Union[str, Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def lowerCAmelCase__(self , _lowercase , _lowercase = 1 , _lowercase = -1 , _lowercase = False , _lowercase = None , _lowercase = 3 , _lowercase = 32 , _lowercase = 32 , ):
'''simple docstring'''
__a : Any = self._generate_dummy_images(_lowercase , _lowercase , _lowercase , _lowercase )
__a : Union[str, Any] = dict(preprocessor(images=_lowercase , return_tensors=_lowercase ) )
return inputs
| 581
| 1
|
a__ : List[str] = '''Input must be a string of 8 numbers plus letter'''
a__ : int = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not isinstance(a__ , a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = F"""Expected string as input, found {type(a__ ).__name__}"""
raise TypeError(a__ )
SCREAMING_SNAKE_CASE : Dict = spanish_id.replace('''-''' , '''''' ).upper()
if len(a__ ) != 9:
raise ValueError(a__ )
try:
SCREAMING_SNAKE_CASE : Union[str, Any] = int(spanish_id_clean[0:8] )
SCREAMING_SNAKE_CASE : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a__ ) from ex
if letter.isdigit():
raise ValueError(a__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
from numpy import exp, pi, sqrt
def UpperCAmelCase_( a__ , a__ = 0.0 , a__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A ( ) -> Optional[Any]:
lowerCamelCase : List[Any] = ArgumentParser("Diffusers CLI tool" ,usage="diffusers-cli <command> [<args>]" )
lowerCamelCase : str = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
lowerCamelCase : Dict = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE ,"func" ):
parser.print_help()
exit(1 )
# Run
lowerCamelCase : str = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 311
|
from manim import *
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase : Optional[Any] = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase : str = [mem.copy() for i in range(6 )]
lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase : Optional[int] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : Dict = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : Union[str, Any] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : Union[str, Any] = Text("CPU" , font_size=24 )
lowerCamelCase : str = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase : int = [mem.copy() for i in range(4 )]
lowerCamelCase : int = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : List[Any] = Text("GPU" , font_size=24 )
lowerCamelCase : int = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase : int = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : Any = Text("Model" , font_size=24 )
lowerCamelCase : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
lowerCamelCase : Optional[int] = []
lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(UpperCamelCase__ ):
lowerCamelCase : Dict = fill.copy().set_fill(UpperCamelCase__ , opacity=0.8 )
target.move_to(UpperCamelCase__ )
model_arr.append(UpperCamelCase__ )
lowerCamelCase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ )
lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCamelCase : Dict = [meta_mem.copy() for i in range(6 )]
lowerCamelCase : Optional[Any] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : Optional[int] = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : List[str] = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
lowerCamelCase : str = Text("Disk" , font_size=24 )
lowerCamelCase : Tuple = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase : str = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase__ )
lowerCamelCase : List[Any] = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ ) )
lowerCamelCase : int = Square(0.3 )
input.set_fill(UpperCamelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCamelCase__ , buff=0.5 )
self.play(Write(UpperCamelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCamelCase__ , buff=0.02 )
self.play(MoveToTarget(UpperCamelCase__ ) )
self.play(FadeOut(UpperCamelCase__ ) )
lowerCamelCase : Optional[int] = Arrow(start=UpperCamelCase__ , end=UpperCamelCase__ , color=UpperCamelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCamelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase : Union[str, Any] = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) )
lowerCamelCase : Any = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(UpperCamelCase__ ) , Circumscribe(model_arr[0] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(model_cpu_arr[0] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase : List[str] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCamelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase : Tuple = AnimationGroup(
FadeOut(UpperCamelCase__ , run_time=0.5 ) , MoveToTarget(UpperCamelCase__ , run_time=0.5 ) , FadeIn(UpperCamelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCamelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase : str = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCamelCase__ ) , Circumscribe(cpu_left_col_base[i] , **UpperCamelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(model_arr[i + 1] , color=UpperCamelCase__ , **UpperCamelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCamelCase__ , **UpperCamelCase__ ) , Circumscribe(gpu_rect[0] , color=UpperCamelCase__ , **UpperCamelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase : List[Any] = a_c
lowerCamelCase : List[str] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCamelCase__ ) , FadeOut(UpperCamelCase__ , run_time=0.5 ) , )
lowerCamelCase : List[str] = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) , MoveToTarget(UpperCamelCase__ ) )
self.wait()
| 311
| 1
|
"""simple docstring"""
import math
class __lowerCAmelCase :
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
__a = 0.0
__a = 0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowercase ( ) -> None:
# Training Examples ( m, n )
__a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
__a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
__a = SelfOrganizingMap()
__a = 3
__a = 0.5
for _ in range(lowerCAmelCase__ ):
for j in range(len(lowerCAmelCase__ ) ):
# training sample
__a = training_samples[j]
# Compute the winning vector
__a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# Update the winning vector
__a = self_organizing_map.update(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# classify test sample
__a = [0, 0, 0, 1]
__a = self_organizing_map.get_winner(lowerCAmelCase__ , lowerCAmelCase__ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 65
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = 'lxmert'
__UpperCAmelCase : str = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = hidden_size
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = num_qa_labels
__a = num_object_labels
__a = num_attr_labels
__a = l_layers
__a = x_layers
__a = r_layers
__a = visual_feat_dim
__a = visual_pos_dim
__a = visual_loss_normalizer
__a = task_matched
__a = task_mask_lm
__a = task_obj_predict
__a = task_qa
__a = visual_obj_loss
__a = visual_attr_loss
__a = visual_feat_loss
__a = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_a )
| 65
| 1
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( a_ ):
SCREAMING_SNAKE_CASE_ : str =(PNDMScheduler,)
SCREAMING_SNAKE_CASE_ : str =(("num_inference_steps", 50),)
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**__UpperCamelCase )
return config
def _a (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config(**__UpperCamelCase )
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
UpperCamelCase__ = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self ) -> Dict:
'''simple docstring'''
pass
def _a (self , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
UpperCamelCase__ = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCamelCase__ = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**__UpperCamelCase )
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
UpperCamelCase__ = 10
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase__ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase__ = model(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase__ = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = dict(self.forward_default_kwargs )
UpperCamelCase__ = kwargs.pop('''num_inference_steps''' , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , '''set_timesteps''' ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , '''set_timesteps''' ):
UpperCamelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCamelCase__ = dummy_past_residuals[:]
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ = scheduler.step_plms(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
UpperCamelCase__ = scheduler.step_plms(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a (self ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _a (self ) -> Any:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _a (self ) -> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _a (self ) -> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _a (self ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _a (self ) -> List[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=__UpperCamelCase )
def _a (self ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase )
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase__ = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
def _a (self ) -> Dict:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**__UpperCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type='''v_prediction''' )
UpperCamelCase__ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCamelCase__ = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 415
|
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase : str = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def __lowerCAmelCase ( ):
__lowerCAmelCase = os.path.dirname(os.path.realpath(__snake_case ) )
__lowerCAmelCase = os.path.join(__snake_case , "words.txt" )
__lowerCAmelCase = ""
with open(__snake_case ) as f:
__lowerCAmelCase = f.readline()
__lowerCAmelCase = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
__lowerCAmelCase = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 367
| 0
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : int = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted([comment for comment in issue.get_comments()] , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 721
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : int = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : List[str] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(A )
UpperCAmelCase__ : int = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = 0
while i < len(A ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(A )
UpperCAmelCase__ : str = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(A )
UpperCAmelCase__ : int = """ """.join(A )
UpperCAmelCase__ : List[str] = word
return word
def __lowercase ( self : Optional[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,A : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(A )
def __lowercase ( self : Optional[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = """""".join(A )
UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Any = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase__ : Any = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Dict = """ """ + text
return (text, kwargs)
def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 194
| 0
|
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase_ = qiskit.execute(lowercase , lowercase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 70
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ) -> None:
"""simple docstring"""
if start is None:
_A = 0
if end is None:
_A = len(__UpperCamelCase ) - 1
if start >= end:
return
_A = (start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
_A , _A = sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292
| 0
|
"""simple docstring"""
def __UpperCamelCase ( snake_case__ , snake_case__ ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(snake_case__ , snake_case__ ) ) )
def __UpperCamelCase ( snake_case__ ):
if point:
if isinstance(snake_case__ , snake_case__ ):
for item in point:
if not isinstance(snake_case__ , (int, float) ):
A_ : Optional[int] = (
"""Expected a list of numbers as input, found """
F"""{type(snake_case__ ).__name__}"""
)
raise TypeError(snake_case__ )
else:
A_ : Any = F"""Expected a list of numbers as input, found {type(snake_case__ ).__name__}"""
raise TypeError(snake_case__ )
else:
raise ValueError("""Missing an input""" )
def __UpperCamelCase ( snake_case__ , snake_case__ ):
_validate_point(snake_case__ )
_validate_point(snake_case__ )
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(snake_case__ , snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
__snake_case = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCamelCase (_a ):
_lowercase = """whisper"""
_lowercase = ["""past_key_values"""]
_lowercase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Union[str, Any],A_: List[str]=5_1865,A_: Tuple=80,A_: List[Any]=6,A_: Dict=4,A_: Dict=6,A_: List[str]=4,A_: List[str]=1536,A_: int=1536,A_: List[str]=0.0,A_: Any=0.0,A_: List[str]=5_0257,A_: Tuple=True,A_: Dict=True,A_: Optional[Any]="gelu",A_: Tuple=256,A_: Dict=0.0,A_: List[Any]=0.0,A_: Dict=0.0,A_: int=0.0_2,A_: List[Any]=False,A_: List[str]=1500,A_: int=448,A_: Dict=5_0256,A_: Dict=5_0256,A_: List[str]=5_0256,A_: Dict=None,A_: List[Any]=[220, 5_0256],A_: Dict=False,A_: str=256,A_: Tuple=False,A_: List[Any]=0.0_5,A_: Dict=10,A_: Optional[int]=2,A_: List[str]=0.0,A_: Optional[Any]=10,A_: Union[str, Any]=0,A_: Dict=7,**A_: List[Any],):
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,is_encoder_decoder=A_,decoder_start_token_id=A_,suppress_tokens=A_,begin_suppress_tokens=A_,**A_,)
class __lowerCamelCase (_a ):
@property
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
__UpperCamelCase = {0: 'batch'}
else:
__UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_,direction='inputs' )
return common_inputs
def snake_case_ ( self: Tuple,A_: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],A_: int = -1,A_: int = -1,A_: bool = False,A_: Optional["TensorType"] = None,A_: int = 2_2050,A_: float = 5.0,A_: int = 220,):
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self,preprocessor=preprocessor.feature_extractor,batch_size=A_,framework=A_,sampling_rate=A_,time_duration=A_,frequency=A_,)
__UpperCamelCase = encoder_inputs['input_features'].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer,A_,A_,A_,A_ )
__UpperCamelCase = encoder_inputs.pop('input_features' )
__UpperCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
return 1E-3
| 1
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase = 13 ,__UpperCamelCase = 64 ,__UpperCamelCase = 2 ,__UpperCamelCase = 3 ,__UpperCamelCase = 3 ,__UpperCamelCase = True ,__UpperCamelCase = True ,__UpperCamelCase = 128 ,__UpperCamelCase=[16, 32, 64, 128] ,__UpperCamelCase = 7 ,__UpperCamelCase = 4 ,__UpperCamelCase = 37 ,__UpperCamelCase = "gelu" ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 10 ,__UpperCamelCase = 0.02 ,__UpperCamelCase = 2 ,__UpperCamelCase = 1 ,__UpperCamelCase = 128 ,__UpperCamelCase = [2, 2, 2, 2] ,__UpperCamelCase = 2 ,__UpperCamelCase = 2 ,) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = image_size
lowercase_ : Union[str, Any] = patch_size
lowercase_ : Optional[int] = num_channels
lowercase_ : List[Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : Tuple = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = type_sequence_label_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = encoder_stride
lowercase_ : Any = num_attention_outputs
lowercase_ : Dict = embed_dim
lowercase_ : Tuple = embed_dim + 1
lowercase_ : Union[str, Any] = resolution
lowercase_ : str = depths
lowercase_ : Optional[Any] = hidden_sizes
lowercase_ : Dict = dim
lowercase_ : str = mlp_expansion_ratio
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict:
'''simple docstring'''
lowercase_ : List[Any] = TFEfficientFormerModel(config=__UpperCamelCase )
lowercase_ : str = model(__UpperCamelCase ,training=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.type_sequence_label_size
lowercase_ : List[Any] = TFEfficientFormerForImageClassification(__UpperCamelCase )
lowercase_ : Dict = model(__UpperCamelCase ,labels=__UpperCamelCase ,training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : List[Any] = 1
lowercase_ : List[str] = TFEfficientFormerForImageClassification(__UpperCamelCase )
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[int] = config_and_inputs
lowercase_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = TFEfficientFormerModelTester(self )
lowercase_ : Optional[int] = ConfigTester(
self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(__UpperCamelCase )
lowercase_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : List[Any] = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = model_class(__UpperCamelCase )
lowercase_ : Optional[Any] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ,training=__UpperCamelCase )
lowercase_ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Tuple = getattr(
self.model_tester ,'expected_num_hidden_layers' ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
if hasattr(self.model_tester ,'encoder_seq_length' ):
lowercase_ : int = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,'chunk_length' ) and self.model_tester.chunk_length > 1:
lowercase_ : List[str] = seq_length * self.model_tester.chunk_length
else:
lowercase_ : Dict = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
lowercase_ : str = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCamelCase ,(list, tuple) )
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Union[str, Any] = getattr(self.model_tester ,'seq_length' ,__UpperCamelCase )
lowercase_ : Optional[Any] = getattr(self.model_tester ,'decoder_seq_length' ,__UpperCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Dict:
'''simple docstring'''
lowercase_ : Dict = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Union[str, Any] = TFEfficientFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
lowercase_ : List[Any] = getattr(self.model_tester ,'seq_length' ,__UpperCamelCase )
lowercase_ : Optional[int] = getattr(self.model_tester ,'encoder_seq_length' ,__UpperCamelCase )
lowercase_ : List[str] = getattr(self.model_tester ,'key_length' ,__UpperCamelCase )
lowercase_ : List[str] = getattr(self.model_tester ,'chunk_length' ,__UpperCamelCase )
if chunk_length is not None and hasattr(self.model_tester ,'num_hashes' ):
lowercase_ : Union[str, Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = True
lowercase_ : str = False
lowercase_ : Union[str, Any] = True
lowercase_ : Union[str, Any] = model_class(__UpperCamelCase )
lowercase_ : List[str] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ,training=__UpperCamelCase )
lowercase_ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : Optional[Any] = True
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : int = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ,training=__UpperCamelCase )
lowercase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowercase_ : Tuple = model_class(__UpperCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowercase_ : Optional[int] = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=__UpperCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.assertTrue(outputs_dict is not None )
def lowercase__( ):
lowercase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
lowercase_ : Dict = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__UpperCamelCase ,return_tensors='tf' )
# forward pass
lowercase_ : List[str] = model(**__UpperCamelCase ,training=__UpperCamelCase )
# verify the logits
lowercase_ : Optional[int] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Optional[Any] = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : int = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : str = image_processor(images=__UpperCamelCase ,return_tensors='tf' )
# forward pass
lowercase_ : Union[str, Any] = model(**__UpperCamelCase ,training=__UpperCamelCase )
# verify the logits
lowercase_ : List[str] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : List[Any] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 425
| 0
|
'''simple docstring'''
def snake_case__ ( UpperCAmelCase : str ):
return "".join(chr(ord(UpperCAmelCase ) - 3_2 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 716
|
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case__ ( UpperCAmelCase : Callable[[int | float], int | float] , UpperCAmelCase : int | float , UpperCAmelCase : int | float , UpperCAmelCase : int = 1_0_0 , ):
lowerCAmelCase__ :int = x_start
lowerCAmelCase__ :int = fnc(UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 0.0
for _ in range(UpperCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCAmelCase__ :Dict = (x_end - x_start) / steps + xa
lowerCAmelCase__ :Optional[Any] = fnc(UpperCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCAmelCase__ :Union[str, Any] = xa
lowerCAmelCase__ :str = fxa
return length
if __name__ == "__main__":
def snake_case__ ( UpperCAmelCase : int ):
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_a : Any = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 111
| 0
|
'''simple docstring'''
import requests
A_ = "" # <-- Put your OpenWeatherMap appid here!
A_ = "https://api.openweathermap.org/data/2.5/"
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE = "Chicago" , __SCREAMING_SNAKE_CASE = APPID ) -> Optional[Any]:
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE = "Kolkata, India" , __SCREAMING_SNAKE_CASE = APPID ) -> Dict:
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE = 55.68 , __SCREAMING_SNAKE_CASE = 12.57 , __SCREAMING_SNAKE_CASE = APPID ) -> Any:
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
A_ = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 270
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A__ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a ( unittest.TestCase ):
def __init__( self : str, SCREAMING_SNAKE_CASE_ : int, SCREAMING_SNAKE_CASE_ : Optional[Any]=7, SCREAMING_SNAKE_CASE_ : List[Any]=3, SCREAMING_SNAKE_CASE_ : Tuple=18, SCREAMING_SNAKE_CASE_ : Dict=30, SCREAMING_SNAKE_CASE_ : Optional[int]=4_00, SCREAMING_SNAKE_CASE_ : List[Any]=True, SCREAMING_SNAKE_CASE_ : Optional[int]=None, SCREAMING_SNAKE_CASE_ : Optional[int]=True, SCREAMING_SNAKE_CASE_ : Dict=False, SCREAMING_SNAKE_CASE_ : List[Any]=True, SCREAMING_SNAKE_CASE_ : int=True, SCREAMING_SNAKE_CASE_ : Any=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_ : Any=[0.5, 0.5, 0.5], ):
snake_case : Dict = parent
snake_case : Union[str, Any] = batch_size
snake_case : Optional[Any] = num_channels
snake_case : int = image_size
snake_case : Dict = min_resolution
snake_case : Optional[Any] = max_resolution
snake_case : Dict = do_resize
snake_case : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 20}
snake_case : Dict = do_thumbnail
snake_case : Dict = do_align_axis
snake_case : List[str] = do_pad
snake_case : Optional[int] = do_normalize
snake_case : Optional[Any] = image_mean
snake_case : List[Any] = image_std
def __snake_case ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __SCREAMING_SNAKE_CASE ,unittest.TestCase ):
_snake_case = DonutImageProcessor if is_vision_available() else None
def __snake_case ( self : Tuple ):
snake_case : List[Any] = DonutImageProcessingTester(self )
@property
def __snake_case ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[int] ):
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a, '''do_resize''' ) )
self.assertTrue(hasattr(_a, '''size''' ) )
self.assertTrue(hasattr(_a, '''do_thumbnail''' ) )
self.assertTrue(hasattr(_a, '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_a, '''do_pad''' ) )
self.assertTrue(hasattr(_a, '''do_normalize''' ) )
self.assertTrue(hasattr(_a, '''image_mean''' ) )
self.assertTrue(hasattr(_a, '''image_std''' ) )
def __snake_case ( self : Dict ):
snake_case : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 20} )
snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {'''height''': 84, '''width''': 42} )
def __snake_case ( self : Dict ):
pass
@is_flaky()
def __snake_case ( self : Optional[Any] ):
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a, Image.Image )
# Test not batched input
snake_case : Any = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
snake_case : List[Any] = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def __snake_case ( self : List[Any] ):
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a, np.ndarray )
# Test not batched input
snake_case : List[str] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
snake_case : str = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
@is_flaky()
def __snake_case ( self : Tuple ):
# Initialize image_processing
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=_a, torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a, torch.Tensor )
# Test not batched input
snake_case : Union[str, Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
snake_case : Tuple = image_processing(_a, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
| 721
|
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = "The Nymphenburg Palace is a beautiful palace in Munich!"
def A ( A_ : str , A_ : str ):
snake_case : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
snake_case : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case : int = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=A_ , output_all_encodings=A_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , A_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case : List[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
snake_case : Dict = _load_vocab(A_ , A_ , A_ , cls=A_ )
snake_case : Tuple = nlp.model.BERTModel(
A_ , len(A_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=A_ , use_token_type_embed=A_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=A_ , use_decoder=A_ , )
original_bort.load_parameters(A_ , cast_dtype=A_ , ignore_extra=A_ )
snake_case : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(A_ ),
}
snake_case : Optional[int] = BertConfig.from_dict(A_ )
snake_case : int = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_ : Tuple , A_ : Dict ):
snake_case : Tuple = hf_param.shape
snake_case : List[Any] = to_torch(params[gluon_param] )
snake_case : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
snake_case : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case : BertSelfAttention = layer.attention.self
snake_case : Optional[int] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
snake_case : Any = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
snake_case : Tuple = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
snake_case : Dict = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
snake_case : int = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
snake_case : int = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
snake_case : BertSelfOutput = layer.attention.output
snake_case : Optional[Any] = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
snake_case : Tuple = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
snake_case : int = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
snake_case : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
snake_case : BertIntermediate = layer.intermediate
snake_case : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
snake_case : Optional[int] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
snake_case : BertOutput = layer.output
snake_case : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
snake_case : int = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case : Any = tokenizer.encode_plus(A_ )['''input_ids''']
# Get gluon output
snake_case : Dict = mx.nd.array([input_ids] )
snake_case : Optional[int] = original_bort(inputs=A_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
snake_case : Optional[int] = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
snake_case : Any = tokenizer.encode_plus(A_ , return_tensors='''pt''' )
snake_case : Any = hf_bort_model(**A_ )[0]
snake_case : Optional[Any] = output_gluon[0].asnumpy()
snake_case : Union[str, Any] = output_hf[0].detach().numpy()
snake_case : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case : Tuple = np.allclose(A_ , A_ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 555
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.