code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from datetime import datetime
import requests
def __magic_name__ ( A : str ):
'''simple docstring'''
a = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(A ).content
if __name__ == "__main__":
__lowerCAmelCase : int = input('Enter Video/IGTV url: ').strip()
__lowerCAmelCase : Optional[Any] = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 107
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a_ : int = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a_ : int = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase = BartTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> List[Any]:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = getattr(__magic_name__ , pre_tok_state.pop('type' ) )
_a = add_prefix_space
_a = pre_tok_class(**__magic_name__ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = 'post_processor'
_a = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['sep'] )
if "cls" in state:
_a = tuple(state['cls'] )
_a = False
if state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('trim_offsets' , __magic_name__ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(__magic_name__ , state.pop('type' ) )
_a = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
_a = value
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Any:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 168
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Dict = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 354
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = 2
A : Dict = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case__ )
if n > 1:
factors.append(snake_case__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 311
| 0
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase : int = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
lowerCamelCase : List[str] = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
lowerCamelCase : Optional[Any] = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Any ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def A ( self : Tuple , _a : Optional[Any] , _a : str , _a : str=None , _a : Optional[int]="uniform_average" , _a : Optional[Any]=True ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =mean_squared_error(
_a , _a , sample_weight=_a , multioutput=_a , squared=_a )
return {"mse": mse}
| 47
|
'''simple docstring'''
def _A ( ):
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
__A = generate_large_matrix()
__A = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _A ( lowercase__ ):
assert all(row == sorted(lowercase__ , reverse=lowercase__ ) for row in grid )
assert all(list(lowercase__ ) == sorted(lowercase__ , reverse=lowercase__ ) for col in zip(*lowercase__ ) )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(lowercase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase__ = (left + right) // 2
lowercase__ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase__ = mid + 1
else:
lowercase__ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase__ )
def _A ( lowercase__ ):
lowercase__ = 0
lowercase__ = len(grid[0] )
for i in range(len(lowercase__ ) ):
lowercase__ = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase__ ) * len(grid[0] )) - total
def _A ( lowercase__ ):
return len([number for row in grid for number in row if number < 0] )
def _A ( lowercase__ ):
lowercase__ = 0
for row in grid:
for i, number in enumerate(lowercase__ ):
if number < 0:
total += len(lowercase__ ) - i
break
return total
def _A ( ):
from timeit import timeit
print("""Running benchmarks""" )
lowercase__ = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase__ = timeit(f'''{func}(grid=grid)''' , setup=lowercase__ , number=500 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 164
| 0
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = '''pix2struct_text_model'''
__lowercase : Any = ['''past_key_values''']
__lowercase : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self ,__UpperCAmelCase=5_0244 ,__UpperCAmelCase=768 ,__UpperCAmelCase=64 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> Tuple:
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Union[str, Any] = d_kv
lowerCAmelCase__ : Optional[int] = d_ff
lowerCAmelCase__ : List[Any] = num_layers
lowerCAmelCase__ : Any = num_heads
lowerCAmelCase__ : Union[str, Any] = relative_attention_num_buckets
lowerCAmelCase__ : Optional[Any] = relative_attention_max_distance
lowerCAmelCase__ : Any = dropout_rate
lowerCAmelCase__ : str = layer_norm_epsilon
lowerCAmelCase__ : Optional[Any] = initializer_factor
lowerCAmelCase__ : Dict = use_cache
lowerCAmelCase__ : Optional[Any] = eos_token_id
lowerCAmelCase__ : Dict = decoder_start_token_id
# for backwards compatibility
lowerCAmelCase__ : Any = dense_act_fn
super().__init__(
pad_token_id=__UpperCAmelCase ,eos_token_id=__UpperCAmelCase ,decoder_start_token_id=__UpperCAmelCase ,tie_word_embeddings=__UpperCAmelCase ,is_decoder=__UpperCAmelCase ,**__UpperCAmelCase ,)
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ : int = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : Optional[int] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Tuple = '''pix2struct_vision_model'''
def __init__( self ,__UpperCAmelCase=768 ,__UpperCAmelCase=768 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=64 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase="gelu_new" ,__UpperCAmelCase=1E-6 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=1E-10 ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=4096 ,__UpperCAmelCase=32 ,__UpperCAmelCase=128 ,**__UpperCAmelCase ,) -> int:
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : Any = patch_embed_hidden_size
lowerCAmelCase__ : List[str] = d_ff
lowerCAmelCase__ : int = dropout_rate
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : int = initializer_factor
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : str = layer_norm_eps
lowerCAmelCase__ : Tuple = dense_act_fn
lowerCAmelCase__ : Tuple = seq_len
lowerCAmelCase__ : Tuple = relative_attention_num_buckets
lowerCAmelCase__ : str = relative_attention_max_distance
lowerCAmelCase__ : Any = d_kv
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,**__UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = cls.get_config_dict(__UpperCAmelCase ,**__UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowerCAmelCase__ : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase ,**__UpperCAmelCase )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : int = '''pix2struct'''
__lowercase : Union[str, Any] = True
def __init__( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=1.0 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(tie_word_embeddings=__UpperCAmelCase ,is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
if text_config is None:
lowerCAmelCase__ : Any = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowerCAmelCase__ : str = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowerCAmelCase__ : Dict = PixaStructTextConfig(**__UpperCAmelCase )
lowerCAmelCase__ : str = PixaStructVisionConfig(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = self.text_config.decoder_start_token_id
lowerCAmelCase__ : str = self.text_config.pad_token_id
lowerCAmelCase__ : Union[str, Any] = self.text_config.eos_token_id
lowerCAmelCase__ : Optional[Any] = initializer_factor
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = self.initializer_range
lowerCAmelCase__ : str = self.initializer_range
lowerCAmelCase__ : Optional[Any] = is_vqa
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase__ : Tuple = self.vision_config.to_dict()
lowerCAmelCase__ : int = self.__class__.model_type
return output
| 363
|
'''simple docstring'''
_lowerCAmelCase = '''Input must be a string of 8 numbers plus letter'''
_lowerCAmelCase = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = f"""Expected string as input, found {type(UpperCamelCase ).__name__}"""
raise TypeError(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCamelCase ) != 9:
raise ValueError(UpperCamelCase )
try:
lowerCAmelCase__ : Optional[int] = int(spanish_id_clean[0:8] )
lowerCAmelCase__ : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCamelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCamelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184
| 0
|
from math import sqrt
def __magic_name__ ( A : int ):
'''simple docstring'''
a = 0
for i in range(1, int(sqrt(A ) + 1 ) ):
if n % i == 0 and i != sqrt(A ):
total += i + n // i
elif i == sqrt(A ):
total += i
return total - n
def __magic_name__ ( A : int = 10000 ):
'''simple docstring'''
a = sum(
i
for i in range(1, A )
if sum_of_divisors(sum_of_divisors(A ) ) == i and sum_of_divisors(A ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 107
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : List[str] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107
| 1
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Any = RoFormerTokenizer
__lowerCamelCase : Optional[int] = RoFormerTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : List[Any] = True
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def UpperCAmelCase__ ( self , **snake_case__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **snake_case__ )
def UpperCAmelCase__ ( self , **snake_case__ ) -> List[str]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **snake_case__ )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''永和服装饰品有限公司,今天天气非常好'''
UpperCAmelCase : List[Any] ='''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =self.get_tokenizer()
UpperCAmelCase , UpperCAmelCase : List[str] =self.get_chinese_input_output_texts()
UpperCAmelCase : Tuple =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
UpperCAmelCase : str =tokens + [tokenizer.unk_token]
UpperCAmelCase : str =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.get_rust_tokenizer()
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =self.get_chinese_input_output_texts()
UpperCAmelCase : Any =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , output_text.split() )
UpperCAmelCase : Optional[Any] =tokens + [tokenizer.unk_token]
UpperCAmelCase : str =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
pass
| 78
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Optional[int] = logging.get_logger(__name__)
a__ : int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a__ : List[Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
a__ : Union[str, Any] = {"facebook/blenderbot-3B": 1_2_8}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
UpperCAmelCase__ : int = BlenderbotTokenizer
def __init__( self :int , _A :Optional[Any]=None , _A :Union[str, Any]=None , _A :Union[str, Any]=None , _A :Any="replace" , _A :int="<s>" , _A :Any="</s>" , _A :List[Any]="</s>" , _A :Dict="<s>" , _A :str="<unk>" , _A :Union[str, Any]="<pad>" , _A :str="<mask>" , _A :Optional[int]=False , _A :str=True , **_A :List[Any] , ) -> int:
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__A = getattr(_A , pre_tok_state.pop('type' ) )
__A = add_prefix_space
__A = pre_tok_class(**_A )
__A = add_prefix_space
__A = 'post_processor'
__A = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
__A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__A = tuple(state['sep'] )
if "cls" in state:
__A = tuple(state['cls'] )
__A = False
if state.get('add_prefix_space' , _A ) != add_prefix_space:
__A = add_prefix_space
__A = True
if state.get('trim_offsets' , _A ) != trim_offsets:
__A = trim_offsets
__A = True
if changes_to_apply:
__A = getattr(_A , state.pop('type' ) )
__A = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self :Tuple ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self :Any , _A :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
__A = value
def lowercase_ ( self :List[str] , *_A :Dict , **_A :List[Any] ) -> BatchEncoding:
'''simple docstring'''
__A = kwargs.get('is_split_into_words' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self :Optional[Any] , *_A :int , **_A :str ) -> BatchEncoding:
'''simple docstring'''
__A = kwargs.get('is_split_into_words' , _A )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self :Dict , _A :str , _A :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__A = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self :Optional[Any] , _A :List[int] , _A :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self :int , _A :List[int] , _A :Optional[List[int]] = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self :Optional[Any] , _A :"Conversation" ) -> List[int]:
'''simple docstring'''
__A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(_A )
__A = ' '.join(_A )
__A = self.encode(_A )
if len(_A ) > self.model_max_length:
__A = input_ids[-self.model_max_length :]
logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 161
|
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161
| 1
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class snake_case :
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->str:
raise NotImplementedError()
def UpperCAmelCase__ ( self) ->Optional[int]:
raise NotImplementedError()
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase) ->int:
a_ = tokenizer
a_ = skip_prompt
a_ = decode_kwargs
# variables used in the streaming process
a_ = []
a_ = 0
a_ = True
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Union[str, Any]:
if len(value.shape) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1")
elif len(value.shape) > 1:
a_ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a_ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist())
a_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
# After the symbol for a new line, we flush the cache.
if text.endswith("\n"):
a_ = text[self.print_len :]
a_ = []
a_ = 0
# If the last token is a CJK character, we print the characters.
elif len(__UpperCAmelCase) > 0 and self._is_chinese_char(ord(text[-1])):
a_ = text[self.print_len :]
self.print_len += len(__UpperCAmelCase)
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a_ = text[self.print_len : text.rfind(" ") + 1]
self.print_len += len(__UpperCAmelCase)
self.on_finalized_text(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
# Flush the cache, if it exists
if len(self.token_cache) > 0:
a_ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs)
a_ = text[self.print_len :]
a_ = []
a_ = 0
else:
a_ = ""
a_ = True
self.on_finalized_text(__UpperCAmelCase , stream_end=__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False) ->Dict:
print(__UpperCAmelCase , flush=__UpperCAmelCase , end="" if not stream_end else None)
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[int]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E00 and cp <= 0X9_FFF)
or (cp >= 0X3_400 and cp <= 0X4_DBF) #
or (cp >= 0X20_000 and cp <= 0X2A_6DF) #
or (cp >= 0X2A_700 and cp <= 0X2B_73F) #
or (cp >= 0X2B_740 and cp <= 0X2B_81F) #
or (cp >= 0X2B_820 and cp <= 0X2C_EAF) #
or (cp >= 0XF_900 and cp <= 0XF_AFF)
or (cp >= 0X2F_800 and cp <= 0X2F_A1F) #
): #
return True
return False
class snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , **__UpperCAmelCase) ->Optional[int]:
super().__init__(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase)
a_ = Queue()
a_ = None
a_ = timeout
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = False) ->str:
self.text_queue.put(__UpperCAmelCase , timeout=self.timeout)
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout)
def __iter__( self) ->Dict:
return self
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.text_queue.get(timeout=self.timeout)
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 303
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if "resnet-50" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
a_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
a_ = DetrConfig(use_timm_backbone=UpperCAmelCase , backbone_config=UpperCAmelCase )
# set label attributes
a_ = "panoptic" in model_name
if is_panoptic:
a_ = 250
else:
a_ = 91
a_ = "huggingface/label-files"
a_ = "coco-detection-id2label.json"
a_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
a_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->Optional[Any]:
"""simple docstring"""
a_ = ""
if is_panoptic:
a_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[:256, :]
a_ = in_proj_bias[:256]
a_ = in_proj_weight[256:512, :]
a_ = in_proj_bias[256:512]
a_ = in_proj_weight[-256:, :]
a_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
a_ = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
a_ = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a_ = in_proj_weight_cross_attn[:256, :]
a_ = in_proj_bias_cross_attn[:256]
a_ = in_proj_weight_cross_attn[256:512, :]
a_ = in_proj_bias_cross_attn[256:512]
a_ = in_proj_weight_cross_attn[-256:, :]
a_ = in_proj_bias_cross_attn[-256:]
def UpperCamelCase ( ) ->Dict:
"""simple docstring"""
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
a_ , a_ = get_detr_config(UpperCAmelCase )
# load original model from torch hub
a_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F'''Converting model {model_name}...''' )
a_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=UpperCAmelCase ).eval()
a_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(UpperCAmelCase ):
if is_panoptic:
a_ = "detr." + src
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase , is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a_ = state_dict.pop(UpperCAmelCase )
a_ = val
# finally, create HuggingFace model and load state dict
a_ = DetrForSegmentation(UpperCAmelCase ) if is_panoptic else DetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
# verify our conversion on an image
a_ = "coco_panoptic" if is_panoptic else "coco_detection"
a_ = DetrImageProcessor(format=UpperCAmelCase )
a_ = processor(images=prepare_img() , return_tensors="pt" )
a_ = encoding["pixel_values"]
a_ = detr(UpperCAmelCase )
a_ = model(UpperCAmelCase )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
UpperCamelCase_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 303
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase ( __magic_name__ ):
'''simple docstring'''
if len(__magic_name__ ) == 0:
return []
UpperCAmelCase , UpperCAmelCase : List[str] = min(__magic_name__ ), max(__magic_name__ )
UpperCAmelCase : int = int(max_value - min_value ) + 1
UpperCAmelCase : list[list] = [[] for _ in range(__magic_name__ )]
for i in my_list:
buckets[int(i - min_value )].append(__magic_name__ )
return [v for bucket in buckets for v in sorted(__magic_name__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 311
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
a : Tuple = logging.getLogger(__name__)
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=__magic_name__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=__magic_name__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=__magic_name__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=__magic_name__ , default="data/dump" , help="The dump file prefix." )
UpperCAmelCase : List[Any] = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCAmelCase : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
UpperCAmelCase : Any = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCAmelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Tuple = tokenizer.special_tokens_map["cls_token"] # `<s>`
UpperCAmelCase : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCAmelCase : List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCAmelCase : Optional[Any] = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
UpperCAmelCase : List[Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
UpperCAmelCase : str = fp.readlines()
logger.info("Start encoding" )
logger.info(F"{len(__magic_name__ )} examples to process." )
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : Union[str, Any] = 1_0000
UpperCAmelCase : Union[str, Any] = time.time()
for text in data:
UpperCAmelCase : Dict = F"{bos} {text.strip()} {sep}"
UpperCAmelCase : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
rslt.append(__magic_name__ )
iter += 1
if iter % interval == 0:
UpperCAmelCase : Dict = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCAmelCase : Any = time.time()
logger.info("Finished binarization" )
logger.info(F"{len(__magic_name__ )} examples processed." )
UpperCAmelCase : str = F"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCAmelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCAmelCase : int = [np.uintaa(__magic_name__ ) for d in rslt]
else:
UpperCAmelCase : int = [np.intaa(__magic_name__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(__magic_name__ , "wb" ) as handle:
pickle.dump(rslt_ , __magic_name__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 311
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = 42
UpperCAmelCase = None
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=0.9_99 , UpperCAmelCase_ : Optional[int]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ : Union[str, Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
A__ = []
for i in range(UpperCAmelCase_ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class a ( _lowerCamelCase, _lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self: List[str] , UpperCamelCase: int = 10_00 , UpperCamelCase: str = "fixed_small_log" , UpperCamelCase: bool = True , UpperCamelCase: Optional[float] = 1.0 , UpperCamelCase: str = "epsilon" , UpperCamelCase: str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
A__ = betas_for_alpha_bar(UpperCamelCase )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas , dim=0 )
A__ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0 , UpperCamelCase )[::-1].copy() )
A__ = variance_type
def UpperCamelCase ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: Union[str, torch.device] = None ):
"""simple docstring"""
A__ = num_inference_steps
A__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
A__ = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
A__ = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: int=None , UpperCamelCase: Any=None , UpperCamelCase: Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
A__ = t - 1
A__ = self.alphas_cumprod[t]
A__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A__ = 1 - alpha_prod_t
A__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A__ = self.betas[t]
else:
A__ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A__ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
A__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
A__ = torch.log(torch.clamp(UpperCamelCase , min=1e-2_0 ) )
A__ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
A__ = variance.log()
A__ = beta.log()
A__ = (predicted_variance + 1) / 2
A__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self: List[str] , UpperCamelCase: torch.FloatTensor , UpperCamelCase: int , UpperCamelCase: torch.FloatTensor , UpperCamelCase: Optional[int] = None , UpperCamelCase: Any=None , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
A__ , A__ = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
A__ = None
# 1. compute alphas, betas
if prev_timestep is None:
A__ = t - 1
A__ = self.alphas_cumprod[t]
A__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
A__ = 1 - alpha_prod_t
A__ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
A__ = self.betas[t]
A__ = self.alphas[t]
else:
A__ = 1 - alpha_prod_t / alpha_prod_t_prev
A__ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A__ = torch.clamp(
UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
A__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A__ = 0
if t > 0:
A__ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase , device=model_output.device )
A__ = self._get_variance(
UpperCamelCase , predicted_variance=UpperCamelCase , prev_timestep=UpperCamelCase , )
if self.variance_type == "fixed_small_log":
A__ = variance
elif self.variance_type == "learned_range":
A__ = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
A__ = variance * variance_noise
A__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.FloatTensor , UpperCamelCase: torch.IntTensor , ):
"""simple docstring"""
A__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
A__ = timesteps.to(original_samples.device )
A__ = alphas_cumprod[timesteps] ** 0.5
A__ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
A__ = sqrt_alpha_prod.unsqueeze(-1 )
A__ = (1 - alphas_cumprod[timesteps]) ** 0.5
A__ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
A__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
A__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 69
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Tuple = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : int = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 69
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=1_6 , UpperCAmelCase=3_6 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=6 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> str:
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =embedding_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_hidden_groups
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def __A (self ) -> str:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
if self.use_token_type_ids:
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A (self ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =AlbertModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase )
_lowercase =model(__lowerCamelCase , token_type_ids=__lowerCamelCase )
_lowercase =model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =AlbertForPreTraining(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , sentence_order_label=__lowerCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =AlbertForMaskedLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =AlbertForQuestionAnswering(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
_lowercase =self.num_labels
_lowercase =AlbertForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_lowercase =self.num_labels
_lowercase =AlbertForTokenClassification(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =model(__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_lowercase =self.num_choices
_lowercase =AlbertForMultipleChoice(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_lowercase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase =model(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A (self ) -> Union[str, Any]:
_lowercase =self.prepare_config_and_inputs()
(
_lowercase
) =config_and_inputs
_lowercase ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Any:
_lowercase =super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class in get_values(__lowerCamelCase ):
_lowercase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase )
_lowercase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase )
return inputs_dict
def __A (self ) -> List[str]:
_lowercase =AlbertModelTester(self )
_lowercase =ConfigTester(self , config_class=__lowerCamelCase , hidden_size=3_7 )
def __A (self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __A (self ) -> Optional[int]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowerCamelCase )
def __A (self ) -> Tuple:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def __A (self ) -> Any:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def __A (self ) -> List[Any]:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def __A (self ) -> str:
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def __A (self ) -> int:
_lowercase =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase =type
self.model_tester.create_and_check_model(*__lowerCamelCase )
@slow
def __A (self ) -> List[Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =AlbertModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@require_torch
class lowerCamelCase__ ( unittest.TestCase):
@slow
def __A (self ) -> Union[str, Any]:
_lowercase =AlbertModel.from_pretrained('''albert-base-v2''' )
_lowercase =torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowercase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase =model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
_lowercase =torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __lowerCamelCase )
_lowercase =torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCamelCase , atol=1e-4 ) )
| 5
|
from __future__ import annotations
A : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : dict[str, list[str]] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase__ : dict[str, str | None] = {}
lowerCamelCase__ : Dict = source_vertex
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : int = {self.source_vertex}
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Dict = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase__ : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowerCamelCase )
lowerCamelCase__ : List[str] = vertex
queue.append(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase__ : Tuple = self.parent.get(__lowerCamelCase )
if target_vertex_parent is None:
lowerCamelCase__ : Tuple = (
f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowerCamelCase )
return self.shortest_path(__lowerCamelCase ) + f"->{target_vertex}"
if __name__ == "__main__":
A : List[str] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 184
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Dict = logging.get_logger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = ["""pixel_values"""]
def __init__( self : Dict , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_5_5 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : List[Any] , )->None:
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[int] , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] , )->np.ndarray:
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] , )->int:
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : str , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Dict , )->np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowercase__ ( self : Tuple , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , )->np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(__UpperCamelCase )
if do_resize:
_UpperCAmelCase = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
_UpperCAmelCase = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
_UpperCAmelCase = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase )
if do_normalize:
_UpperCAmelCase = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
_UpperCAmelCase = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Any , )->PIL.Image.Image:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name='''crop_size''' )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_UpperCAmelCase = make_batched(__UpperCamelCase )
_UpperCAmelCase = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 326
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(number**0.5 )
return number == sq * sq
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_UpperCAmelCase = x_den * y_den * z_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase ( _SCREAMING_SNAKE_CASE : int = 35 ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 42
_UpperCAmelCase = Fraction(0 )
_UpperCAmelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_UpperCAmelCase = x_num * y_den + x_den * y_num
_UpperCAmelCase = x_den * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_UpperCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
_UpperCAmelCase = x_num * y_num
_UpperCAmelCase = x_den * y_num + x_num * y_den
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
_UpperCAmelCase = x_num * x_num * y_num * y_num
_UpperCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = int(sqrt(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_UpperCAmelCase = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 326
| 1
|
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
snake_case_ = HUGGINGFACE_HUB_CACHE
snake_case_ = """config.json"""
snake_case_ = """diffusion_pytorch_model.bin"""
snake_case_ = """diffusion_flax_model.msgpack"""
snake_case_ = """model.onnx"""
snake_case_ = """diffusion_pytorch_model.safetensors"""
snake_case_ = """weights.pb"""
snake_case_ = """https://huggingface.co"""
snake_case_ = default_cache_path
snake_case_ = """diffusers_modules"""
snake_case_ = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
snake_case_ = ["""fp16""", """non-ema"""]
snake_case_ = """.self_attn"""
| 78
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78
| 1
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCamelCase = deepcopy(lowerCAmelCase__ )
elif os.path.exists(lowerCAmelCase__ ):
with io.open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
_UpperCamelCase = json.load(lowerCAmelCase__ )
else:
try:
_UpperCamelCase = baseaa.urlsafe_baadecode(lowerCAmelCase__ ).decode('''utf-8''' )
_UpperCamelCase = json.loads(lowerCAmelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
_UpperCamelCase = config
self.set_stage_and_offload()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
_UpperCamelCase = False
if self.is_zeroa() or self.is_zeroa():
_UpperCamelCase = set(['''cpu''', '''nvme'''] )
_UpperCamelCase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_UpperCamelCase = True
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.config
# find the config node of interest if it exists
_UpperCamelCase = ds_key_long.split('''.''' )
_UpperCamelCase = nodes.pop()
for node in nodes:
_UpperCamelCase = config.get(lowerCAmelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.find_config_node(lowerCAmelCase__ )
if config is None:
return default
return config.get(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=False ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.config
# find the config node of interest if it exists
_UpperCamelCase = ds_key_long.split('''.''' )
for node in nodes:
_UpperCamelCase = config
_UpperCamelCase = config.get(lowerCAmelCase__ )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.get_value(lowerCAmelCase__ )
return False if value is None else bool(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_value(lowerCAmelCase__ )
return False if value is None else not bool(lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> str:
'''simple docstring'''
return self._stage == 2
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return self._stage == 3
def snake_case__ ( self : List[str] ) -> str:
'''simple docstring'''
return self._offload
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = engine
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
self.engine.backward(lowerCAmelCase__ , **lowerCAmelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , device_placement=lowerCAmelCase__ , scaler=lowerCAmelCase__ )
_UpperCamelCase = hasattr(self.optimizer , '''overflow''' )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=0.001 , lowerCAmelCase__ : str=0 , **lowerCAmelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = params
_UpperCamelCase = lr
_UpperCamelCase = weight_decay
_UpperCamelCase = kwargs
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=0 , **lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
_UpperCamelCase = optimizer
_UpperCamelCase = total_num_steps
_UpperCamelCase = warmup_num_steps
_UpperCamelCase = kwargs
| 358
|
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''c'''] )
self.assertEqual(lowerCAmelCase__ , [2] )
# Out indices set to match out features
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features set to match out indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [0, 2] )
# Out features selected from negative indices
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , ['''a''', '''c'''] )
self.assertEqual(lowerCAmelCase__ , [-3, -1] )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowerCAmelCase__ )
# Out features must be a list
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowerCAmelCase__ ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = BackboneMixin()
_UpperCamelCase = ['''a''', '''b''', '''c''']
_UpperCamelCase = ['''a''', '''c''']
_UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCamelCase = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 287
| 0
|
from timeit import timeit
def a__ ( snake_case ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while number:
number &= number - 1
result += 1
return result
def a__ ( snake_case ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
__SCREAMING_SNAKE_CASE : List[str] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a__ ( ):
"""simple docstring"""
def do_benchmark(snake_case ) -> None:
__SCREAMING_SNAKE_CASE : Tuple = '''import __main__ as z'''
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(snake_case ) = }''' )
__SCREAMING_SNAKE_CASE : List[str] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(snake_case ) = }''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 303
|
import sys
from collections import defaultdict
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ ( self : List[str] , _A : str ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = pos
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE : List[Any] = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = temp, tempa
__SCREAMING_SNAKE_CASE : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Dict , _A : Optional[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE : Optional[Any] = heap[parent]
__SCREAMING_SNAKE_CASE : str = position[parent]
self.set_position(position[parent] , _A )
else:
__SCREAMING_SNAKE_CASE : List[str] = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , _A )
break
__SCREAMING_SNAKE_CASE : List[Any] = parent
else:
__SCREAMING_SNAKE_CASE : Tuple = val
__SCREAMING_SNAKE_CASE : List[str] = temp
self.set_position(_A , 0 )
def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = positions[0]
__SCREAMING_SNAKE_CASE : Tuple = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = Heap()
__SCREAMING_SNAKE_CASE : int = [0] * len(snake_case )
__SCREAMING_SNAKE_CASE : Dict = [-1] * len(snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE : Dict = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE : Optional[int] = []
for vertex in range(len(snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case )
heap.node_position.append(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : str = 1
__SCREAMING_SNAKE_CASE : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = distance
heap.heapify(snake_case , snake_case )
for _ in range(1 , len(snake_case ) ):
__SCREAMING_SNAKE_CASE : Tuple = heap.delete_minimum(snake_case , snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case )]
):
__SCREAMING_SNAKE_CASE : int = distance
heap.bottom_to_top(
snake_case , heap.get_position(snake_case ) , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 303
| 1
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'mvp'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , _lowerCamelCase : Dict=50267 , _lowerCamelCase : Union[str, Any]=1024 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : str=4096 , _lowerCamelCase : str=16 , _lowerCamelCase : int=12 , _lowerCamelCase : Union[str, Any]=4096 , _lowerCamelCase : Any=16 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Tuple=1024 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=False , _lowerCamelCase : int=True , _lowerCamelCase : Tuple=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : int=2 , _lowerCamelCase : int=2 , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=800 , **_lowerCamelCase : int , ):
"""simple docstring"""
A_ : List[Any] = vocab_size
A_ : Dict = max_position_embeddings
A_ : str = d_model
A_ : int = encoder_ffn_dim
A_ : Optional[Any] = encoder_layers
A_ : Dict = encoder_attention_heads
A_ : Optional[Any] = decoder_ffn_dim
A_ : Dict = decoder_layers
A_ : str = decoder_attention_heads
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : int = activation_dropout
A_ : List[str] = activation_function
A_ : Dict = init_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : int = classifier_dropout
A_ : int = use_cache
A_ : Any = encoder_layers
A_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : List[Any] = use_prompt
A_ : int = prompt_length
A_ : Optional[Any] = prompt_mid_dim
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , forced_eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _lowerCamelCase ):
A_ : List[str] = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
| 4
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]:
A_ : Tuple = state_dict.pop(lowerCamelCase__ )
A_ : Optional[Any] = val
def snake_case__ ( lowerCamelCase__ : Dict ) -> Any:
A_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A_ : List[str] = value
else:
A_ : Optional[int] = value
return new_state_dict
def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
A_ : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:2_5_6, :]
A_ : Optional[Any] = in_proj_bias[:2_5_6]
A_ : Dict = in_proj_weight[2_5_6:5_1_2, :]
A_ : Tuple = in_proj_bias[2_5_6:5_1_2]
A_ : Tuple = in_proj_weight[-2_5_6:, :]
A_ : Optional[int] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[:2_5_6, :]
A_ : int = in_proj_bias[:2_5_6]
A_ : Any = in_proj_weight[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias[2_5_6:5_1_2]
A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :]
A_ : Optional[Any] = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
A_ : Tuple = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :]
A_ : Tuple = in_proj_bias_cross_attn[:2_5_6]
A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2]
A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :]
A_ : Any = in_proj_bias_cross_attn[-2_5_6:]
def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict:
A_ ,A_ : int = image.size
A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
A_ : Union[str, Any] = target_max_size / current_max_size
A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( lowerCamelCase__ : Tuple ) -> str:
A_ : Any = F.to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str:
logger.info('''Converting model...''' )
# load original state dict
A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
A_ : str = rename_backbone_keys(lowerCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
A_ : str = val
# create HuggingFace model and load state dict
A_ : Union[str, Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Dict = 1_5
A_ : Dict = 2
A_ : int = {0: '''table''', 1: '''table rotated'''}
A_ : List[str] = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
else:
A_ : Union[str, Any] = 1_2_5
A_ : Optional[Any] = 6
A_ : Optional[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A_ : int = idalabel
A_ : Tuple = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify our conversion
A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ )
A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' )
A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 )
A_ : str = model(lowerCamelCase__ )
if "detection" in checkpoint_url:
A_ : str = (1, 1_5, 3)
A_ : int = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
A_ : Optional[int] = (1, 1_2_5, 7)
A_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A_ : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCamelCase__ )
image_processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case__ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 4
| 1
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> List[str]:
return sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x[column] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=float('inf' ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , UpperCAmelCase ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=float('inf' ) ) -> Optional[int]:
for i in range(min(6 , points_counts - 1 ) , UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , UpperCAmelCase ):
snake_case_ = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ = current_dis
return min_dis
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(UpperCAmelCase , UpperCAmelCase )
# recursion
snake_case_ = points_counts // 2
snake_case_ = closest_pair_of_points_sqr(
UpperCAmelCase , points_sorted_on_y[:mid] , UpperCAmelCase )
snake_case_ = closest_pair_of_points_sqr(
UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ = min(UpperCAmelCase , UpperCAmelCase )
snake_case_ = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(UpperCAmelCase )
snake_case_ = dis_between_closest_in_strip(
UpperCAmelCase , len(UpperCAmelCase ) , UpperCAmelCase )
return min(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
snake_case_ = column_based_sort(UpperCAmelCase , column=0 )
snake_case_ = column_based_sort(UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
__UpperCamelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 69
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> None:
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
snake_case_ = False
__UpperCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 69
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Dict =["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : str = size if size is not None else {"shortest_edge": 256}
lowerCAmelCase : Tuple = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Dict = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase : Dict = get_size_dict(snake_case__ , param_name="crop_size" )
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Dict = size
lowerCAmelCase : List[str] = resample
lowerCAmelCase : Dict = do_center_crop
lowerCAmelCase : Dict = crop_size
lowerCAmelCase : Optional[Any] = do_rescale
lowerCAmelCase : int = rescale_factor
lowerCAmelCase : str = do_normalize
lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : int = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase : Optional[int] = get_resize_output_image_size(snake_case__ , size=size["shortest_edge"] , default_to_square=snake_case__ )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(snake_case__ , size=(size["height"], size["width"]) , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Dict = size if size is not None else self.size
lowerCAmelCase : int = get_size_dict(snake_case__ , default_to_square=snake_case__ )
lowerCAmelCase : Dict = resample if resample is not None else self.resample
lowerCAmelCase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : str = get_size_dict(snake_case__ , param_name="crop_size" )
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase : Union[str, Any] = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
lowerCAmelCase : Union[str, Any] = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
lowerCAmelCase : Any = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
lowerCAmelCase : Union[str, Any] = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
lowerCAmelCase : str = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
lowerCAmelCase : Optional[int] = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
lowerCAmelCase : Optional[int] = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
lowerCAmelCase : int = target_sizes.numpy()
lowerCAmelCase : Dict = []
for idx in range(len(snake_case__ ) ):
lowerCAmelCase : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
lowerCAmelCase : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
lowerCAmelCase : Dict = logits.argmax(dim=1 )
lowerCAmelCase : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 133
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =(DEISMultistepScheduler,)
a : str =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : Optional[int] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.timesteps[5]
lowerCAmelCase : str = scheduler.timesteps[6]
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = 10
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 133
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase__( lowercase : int ) -> List[List[ImageInput]]:
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Dict =["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
__snake_case : int = size if size is not None else {"shortest_edge": 224}
__snake_case : List[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__snake_case : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
__snake_case : str = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__snake_case : str = do_resize
__snake_case : str = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : Any = crop_size
__snake_case : Any = resample
__snake_case : Dict = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Optional[Any] = do_normalize
__snake_case : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__snake_case : Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
__snake_case : int = get_resize_output_image_size(UpperCAmelCase , size["shortest_edge"] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
__snake_case : Any = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
__snake_case : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__snake_case : Union[str, Any] = to_numpy_array(UpperCAmelCase )
if do_resize:
__snake_case : List[Any] = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
__snake_case : str = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
__snake_case : str = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
__snake_case : Dict = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
__snake_case : Any = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
__snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : str = resample if resample is not None else self.resample
__snake_case : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Union[str, Any] = image_std if image_std is not None else self.image_std
__snake_case : Tuple = size if size is not None else self.size
__snake_case : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size
__snake_case : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
__snake_case : List[str] = make_batched(UpperCAmelCase )
__snake_case : Optional[int] = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
__snake_case : List[str] = {"pixel_values": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 326
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str =JukeboxTokenizer
UpperCAmelCase_ : Tuple ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case : Union[str, Any] = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
import torch
__snake_case : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case : int = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 326
| 1
|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.json'''}
_lowerCAmelCase : List[Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_lowerCAmelCase : Union[str, Any] = {'''mgp-str''': 27}
class __magic_name__ ( _UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :Union[str, Any] , snake_case :str , snake_case :str="[GO]" , snake_case :Optional[Any]="[GO]" , snake_case :Optional[Any]="[s]" , snake_case :List[Any]="[GO]" , **snake_case :List[Any] ):
'''simple docstring'''
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" ) as vocab_handle:
A_ : Dict = json.load(SCREAMING_SNAKE_CASE_ )
A_ : Optional[int] = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return len(self.vocab )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[Any] ):
'''simple docstring'''
A_ : Dict = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[int] ):
'''simple docstring'''
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Any ):
'''simple docstring'''
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Optional[Any] , snake_case :List[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error("Vocabulary path ({}) should be a directory".format(SCREAMING_SNAKE_CASE_ ) )
return
A_ : str = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + "\n" )
return (vocab_file,)
| 355
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''git_vision_model'''
def __init__( self :Union[str, Any] , snake_case :str=768 , snake_case :str=3_072 , snake_case :Optional[Any]=12 , snake_case :Any=12 , snake_case :Dict=3 , snake_case :Union[str, Any]=224 , snake_case :Optional[int]=16 , snake_case :Union[str, Any]="quick_gelu" , snake_case :Optional[int]=1e-5 , snake_case :List[str]=0.0 , snake_case :Any=0.02 , **snake_case :str , ):
'''simple docstring'''
super().__init__(**snake_case )
A_ : Optional[int] = hidden_size
A_ : Optional[Any] = intermediate_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : int = num_channels
A_ : Tuple = patch_size
A_ : Dict = image_size
A_ : Optional[int] = initializer_range
A_ : str = attention_dropout
A_ : Tuple = layer_norm_eps
A_ : List[str] = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls :Any , snake_case :Union[str, os.PathLike] , **snake_case :List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case )
A_ , A_ : Optional[Any] = cls.get_config_dict(snake_case , **snake_case )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
A_ : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(snake_case , **snake_case )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''git'''
def __init__( self :List[str] , snake_case :Any=None , snake_case :int=30_522 , snake_case :Dict=768 , snake_case :List[Any]=6 , snake_case :Any=12 , snake_case :Any=3_072 , snake_case :List[Any]="gelu" , snake_case :Union[str, Any]=0.1 , snake_case :Any=0.1 , snake_case :Optional[int]=1_024 , snake_case :str=0.02 , snake_case :int=1e-12 , snake_case :Optional[int]=0 , snake_case :int="absolute" , snake_case :Tuple=True , snake_case :List[str]=False , snake_case :List[str]=101 , snake_case :int=102 , snake_case :str=None , **snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , pad_token_id=snake_case , **snake_case )
if vision_config is None:
A_ : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
A_ : List[Any] = GitVisionConfig(**snake_case )
A_ : Optional[int] = vocab_size
A_ : List[str] = hidden_size
A_ : int = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Dict = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : List[str] = initializer_range
A_ : int = layer_norm_eps
A_ : Dict = position_embedding_type
A_ : str = use_cache
A_ : str = tie_word_embeddings
A_ : Optional[Any] = num_image_with_embedding
A_ : int = bos_token_id
A_ : Optional[int] = eos_token_id
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.vision_config.to_dict()
A_ : Optional[Any] = self.__class__.model_type
return output
| 70
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class A__(__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : str = field(default='''automatic-speech-recognition''', metadata={'''include_in_asdict_even_if_is_default''': True} )
_A : ClassVar[Features] = Features({'''audio''': Audio()} )
_A : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_A : str = "audio"
_A : str = "transcription"
def UpperCamelCase__ ( self , _lowercase ) -> int:
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
a_ : Optional[Any] = copy.deepcopy(self )
a_ : List[Any] = self.input_schema.copy()
a_ : Tuple = features[self.audio_column]
a_ : int = input_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 248
|
_lowerCamelCase ={
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.6_0_2_1_7_6_6_3_4E-1_9,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355818,
}
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCamelCase : Dict = (
F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'''
F'''Valid values are: {", ".join(lowerCamelCase )}'''
)
raise ValueError(lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 287
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str] = "sew-d"
def __init__( self, __magic_name__=32, __magic_name__=768, __magic_name__=12, __magic_name__=12, __magic_name__=3072, __magic_name__=2, __magic_name__=512, __magic_name__=256, __magic_name__=True, __magic_name__=True, __magic_name__=("p2c", "c2p"), __magic_name__="layer_norm", __magic_name__="gelu_python", __magic_name__=0.1, __magic_name__=0.1, __magic_name__=0.1, __magic_name__=0.0, __magic_name__=0.1, __magic_name__=0.02, __magic_name__=1E-7, __magic_name__=1E-5, __magic_name__="group", __magic_name__="gelu", __magic_name__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), __magic_name__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), __magic_name__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), __magic_name__=False, __magic_name__=128, __magic_name__=16, __magic_name__=True, __magic_name__=0.05, __magic_name__=10, __magic_name__=2, __magic_name__=0.0, __magic_name__=10, __magic_name__=0, __magic_name__="mean", __magic_name__=False, __magic_name__=False, __magic_name__=256, __magic_name__=0, __magic_name__=1, __magic_name__=2, **__magic_name__, ) -> Dict:
"""simple docstring"""
super().__init__(**__magic_name__, pad_token_id=__magic_name__, bos_token_id=__magic_name__, eos_token_id=__magic_name__ )
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : str = feat_extract_norm
UpperCamelCase__ : Union[str, Any] = feat_extract_activation
UpperCamelCase__ : Any = list(__magic_name__ )
UpperCamelCase__ : Optional[Any] = list(__magic_name__ )
UpperCamelCase__ : Optional[Any] = list(__magic_name__ )
UpperCamelCase__ : Any = conv_bias
UpperCamelCase__ : str = num_conv_pos_embeddings
UpperCamelCase__ : int = num_conv_pos_embedding_groups
UpperCamelCase__ : Union[str, Any] = len(self.conv_dim )
UpperCamelCase__ : Union[str, Any] = num_hidden_layers
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : List[Any] = squeeze_factor
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : List[str] = position_buckets
UpperCamelCase__ : int = share_att_key
UpperCamelCase__ : Any = relative_attention
UpperCamelCase__ : Union[str, Any] = norm_rel_ebd
UpperCamelCase__ : Tuple = list(__magic_name__ )
UpperCamelCase__ : str = hidden_act
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Any = hidden_dropout
UpperCamelCase__ : Optional[int] = attention_dropout
UpperCamelCase__ : str = activation_dropout
UpperCamelCase__ : int = feat_proj_dropout
UpperCamelCase__ : Dict = final_dropout
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : int = feature_layer_norm_eps
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : List[Any] = apply_spec_augment
UpperCamelCase__ : Union[str, Any] = mask_time_prob
UpperCamelCase__ : Any = mask_time_length
UpperCamelCase__ : Optional[int] = mask_time_min_masks
UpperCamelCase__ : List[Any] = mask_feature_prob
UpperCamelCase__ : str = mask_feature_length
UpperCamelCase__ : Optional[int] = mask_feature_min_masks
# ctc loss
UpperCamelCase__ : int = ctc_loss_reduction
UpperCamelCase__ : Tuple = ctc_zero_infinity
# sequence classification
UpperCamelCase__ : Tuple = use_weighted_layer_sum
UpperCamelCase__ : Optional[int] = classifier_proj_size
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul, self.conv_stride, 1 )
| 360
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = '''mvp'''
lowerCamelCase : str = ['''past_key_values''']
lowerCamelCase : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCAmelCase__ : List[Any]=5_0_2_6_7 , UpperCAmelCase__ : Union[str, Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=1_2 , UpperCAmelCase__ : Optional[Any]=4_0_9_6 , UpperCAmelCase__ : str=1_6 , UpperCAmelCase__ : List[str]=1_2 , UpperCAmelCase__ : int=4_0_9_6 , UpperCAmelCase__ : List[Any]=1_6 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : Optional[Any]=1_0_2_4 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Union[str, Any]=1_0_0 , UpperCAmelCase__ : Tuple=8_0_0 , **UpperCAmelCase__ : Optional[int] , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = classifier_dropout
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = use_prompt
lowerCAmelCase = prompt_length
lowerCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , UpperCAmelCase__ ):
lowerCAmelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 4
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text_2'''
lowerCamelCase : Any = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=1_0_0_0_0 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : Optional[Any]=2_0_4_8 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="relu" , UpperCAmelCase__ : Any=2_5_6 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : int=1_0_2_4 , **UpperCAmelCase__ : Optional[Any] , ) -> Dict:
lowerCAmelCase = vocab_size
lowerCAmelCase = d_model
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = decoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase = max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 4
| 1
|
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
SCREAMING_SNAKE_CASE__ = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
SCREAMING_SNAKE_CASE__ = BeautifulSoup(res.text, """html.parser""")
SCREAMING_SNAKE_CASE__ = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 297
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(snake_case__ ) , torch_builtin(snake_case__ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case__ ) , gelu_new(snake_case__ ) ) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = get_activation("gelu_10" )
_UpperCAmelCase = torch_builtin(snake_case__ )
_UpperCAmelCase = geluaa(snake_case__ )
_UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(snake_case__ ):
get_activation("bogus" )
with self.assertRaises(snake_case__ ):
get_activation(snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case__ ):
_UpperCAmelCase = acta.a
| 133
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133
| 1
|
from __future__ import annotations
from math import pi
def __A (__A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowercase_ ( _snake_case ):
create_state_space_tree(_snake_case ,[] ,0 )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if index == len(_snake_case ):
print(_snake_case )
return
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 25
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__( self : str , __snake_case : Any ) -> str:
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self : List[str] , __snake_case : Union[str, Any] ) -> Any:
return self.key < other.key
def __repr__( self : Optional[Any] ) -> Optional[Any]:
return self.id
def lowercase__ ( self : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]:
self.neighbors.append(__snake_case )
def lowercase__ ( self : Tuple , __snake_case : List[str] , __snake_case : Tuple ) -> Any:
_lowerCAmelCase = weight
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(lowerCAmelCase )
q.remove(lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(lowerCAmelCase )
hq.heapify(lowerCAmelCase )
while h:
_lowerCAmelCase = hq.heappop(lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(lowerCAmelCase )
for i in range(1 , len(lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 229
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_lowercase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_lowercase = concatenate_datasets
_lowercase = DownloadConfig
_lowercase = DownloadManager
_lowercase = DownloadMode
_lowercase = DownloadConfig
_lowercase = DownloadMode
_lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 229
| 1
|
import random
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(lowercase_ )
elif element > pivot:
greater.append(lowercase_ )
else:
equal.append(lowercase_ )
return less, equal, greater
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
if index >= len(lowercase_ ) or index < 0:
return None
lowerCAmelCase__ : Optional[int] = items[random.randint(0 , len(lowercase_ ) - 1 )]
lowerCAmelCase__ : int = 0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = _partition(lowercase_ , lowercase_ )
lowerCAmelCase__ : Optional[Any] = len(lowercase_ )
lowerCAmelCase__ : Dict = len(lowercase_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase_ , lowercase_ )
# must be in larger
else:
return quick_select(lowercase_ , index - (m + count) )
| 131
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247
| 0
|
from collections.abc import Generator
def A ( ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = 0, 1
while True:
_lowerCAmelCase , _lowerCAmelCase : List[str] = b, a + b
yield b
def A ( _lowerCamelCase = 1_000 ):
'''simple docstring'''
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Any = fibonacci_generator()
while len(str(next(_lowerCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 300
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a__:
lowercase__ = 42
lowercase__ = None
lowercase__ = None
lowerCAmelCase: Optional[Any] = namedtuple('CoinsDistribResult', 'moves excess')
def lowerCamelCase__ ( _A ):
if root is None:
return 0
# Validation
def count_nodes(_A ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_A ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_A ) != count_coins(_A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_A ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a , a : str = get_distrib(node.left )
a , a : int = get_distrib(node.right )
a : Dict = 1 - left_distrib_excess
a : List[Any] = 1 - right_distrib_excess
a : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(_A )
+ abs(_A )
)
a : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_A , _A )
return get_distrib(_A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 297
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : int = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class UpperCamelCase ( __lowerCAmelCase ):
"""simple docstring"""
A : Optional[Any] = '''switch_transformers'''
A : str = ['''past_key_values''']
A : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=3_2_1_2_8 , UpperCAmelCase_ : Dict=7_6_8 , UpperCAmelCase_ : List[Any]=6_4 , UpperCAmelCase_ : List[str]=2_0_4_8 , UpperCAmelCase_ : Any=6_4 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[Any]=0.01 , UpperCAmelCase_ : Optional[int]="float32" , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : Dict=1_2_8 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]=1e-6 , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : int=0.0_01 , UpperCAmelCase_ : List[str]=1.0 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=1 , **UpperCAmelCase_ : int , ):
"""simple docstring"""
a : Any = vocab_size
a : Any = d_model
a : Dict = d_kv
a : Optional[Any] = d_ff
a : Dict = num_sparse_encoder_layers
a : Optional[Any] = num_layers
a : List[str] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : Tuple = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
a : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a : List[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers
a : Any = num_heads
a : Tuple = num_experts
a : Optional[Any] = expert_capacity
a : int = router_bias
a : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""")
a : Dict = router_dtype
a : Optional[int] = router_ignore_padding_tokens
a : Union[str, Any] = relative_attention_num_buckets
a : List[str] = relative_attention_max_distance
a : Tuple = dropout_rate
a : Tuple = layer_norm_epsilon
a : Any = initializer_factor
a : Optional[int] = feed_forward_proj
a : str = use_cache
a : Optional[Any] = add_router_probs
a : int = router_z_loss_coef
a : Tuple = router_aux_loss_coef
a : int = self.feed_forward_proj.split('-')
a : int = act_info[-1]
a : Any = act_info[0] == 'gated'
if len(lowerCAmelCase_) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a : str = 'gelu_new'
super().__init__(
pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 365
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345
| 0
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A_ :Dict = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def A ( a_ ,a_=None ) -> Union[str, Any]:
require_version(deps[pkg] ,a_ )
| 71
|
import os
# Precomputes a list of the 100 first triangular numbers
lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _snake_case( ) -> int:
'''simple docstring'''
A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) )
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' )
A__ = ''
with open(SCREAMING_SNAKE_CASE__ ) as f:
A__ = f.readline()
A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A__ = [
word
for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 7
| 0
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__a : int = TapasConfig.from_json_file(lowerCAmelCase__ )
# set absolute/relative position embeddings parameter
__a : Any = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__a : Union[str, Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WTQ":
# run_task_main.py hparams
__a : Optional[Any] = 4
__a : Tuple = True
# hparam_utils.py hparams
__a : Optional[int] = 0.66_46_94
__a : str = 0.20_79_51
__a : List[str] = 0.12_11_94
__a : Tuple = True
__a : Any = True
__a : int = False
__a : Union[str, Any] = 0.0_35_25_13
__a : List[Any] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__a : Any = 4
__a : List[Any] = False
# hparam_utils.py hparams
__a : Union[str, Any] = 36.45_19
__a : str = 0.90_34_21
__a : Optional[int] = 2_2_2.0_8_8
__a : str = True
__a : Any = True
__a : Optional[Any] = True
__a : List[str] = 0.76_31_41
__a : List[str] = TapasForQuestionAnswering(config=lowerCAmelCase__ )
elif task == "TABFACT":
__a : Optional[int] = TapasForSequenceClassification(config=lowerCAmelCase__ )
elif task == "MLM":
__a : int = TapasForMaskedLM(config=lowerCAmelCase__ )
elif task == "INTERMEDIATE_PRETRAINING":
__a : Union[str, Any] = TapasModel(config=lowerCAmelCase__ )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase__ )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
__a : List[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''' , model_max_length=5_1_2 )
tokenizer.save_pretrained(lowerCAmelCase__ )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ =parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 367
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 90
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
pass
class _lowercase :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Any ) -> None:
__lowerCAmelCase = data
__lowerCAmelCase = None
def __iter__( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self
__lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(SCREAMING_SNAKE_CASE__ )
yield node.data
__lowerCAmelCase = node.next_node
@property
def a ( self : int ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_A : Dict = Node(1)
_A : Tuple = Node(2)
_A : Optional[int] = Node(3)
_A : List[Any] = Node(4)
print(root_node.has_loop) # False
_A : int = root_node.next_node
print(root_node.has_loop) # True
_A : Any = Node(5)
_A : List[Any] = Node(6)
_A : Any = Node(5)
_A : int = Node(6)
print(root_node.has_loop) # False
_A : str = Node(1)
print(root_node.has_loop) # False
| 229
|
'''simple docstring'''
import numpy as np
import qiskit
def UpperCamelCase_ ( snake_case_ : int = 8 , snake_case_ : int | None = None ) -> str:
'''simple docstring'''
__lowerCAmelCase = np.random.default_rng(seed=snake_case_ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# The set of states Alice will prepare.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# Measurement basis for Bob's qubits.
__lowerCAmelCase = rng.integers(2 , size=snake_case_ )
# Quantum Circuit to simulate BB84
__lowerCAmelCase = qiskit.QuantumCircuit(snake_case_ , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case_ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case_ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case_ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowerCAmelCase = qiskit.execute(snake_case_ , snake_case_ , shots=1 , seed_simulator=snake_case_ )
# Returns the result of measurement.
__lowerCAmelCase = job.result().get_counts(snake_case_ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case_ , snake_case_ , snake_case_ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowerCAmelCase = gen_key[:key_len] if len(snake_case_ ) >= key_len else gen_key.ljust(snake_case_ , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 229
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def __lowerCamelCase ( snake_case__ ,snake_case__=None ,snake_case__=None ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
while ask_again:
_SCREAMING_SNAKE_CASE = input(snake_case__ )
try:
if default is not None and len(snake_case__ ) == 0:
return default
return convert_value(snake_case__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__=[] ,snake_case__=None ,snake_case__=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BulletMenu(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = menu.run(default_choice=snake_case__ )
return convert_value(snake_case__ ) if convert_value is not None else result
def __lowerCamelCase ( snake_case__ ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(snake_case__ )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def __lowerCamelCase ( snake_case__ ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(snake_case__ )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(snake_case__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(snake_case__ )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = int(snake_case__ )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __UpperCAmelCase (argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = super()._format_usage(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 354
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 0
|
_lowerCAmelCase : Optional[Any] = 65_521
def __snake_case ( _lowerCAmelCase : str ) -> int:
A_ : str = 1
A_ : List[str] = 0
for plain_chr in plain_text:
A_ : Optional[Any] = (a + ord(_lowerCAmelCase )) % MOD_ADLER
A_ : Optional[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 300
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = ['''input_features''', '''attention_mask''']
def __init__( self :int , snake_case :int=80 , snake_case :Optional[int]=16_000 , snake_case :Tuple=0.0 , snake_case :Optional[int]=10 , snake_case :Optional[Any]=25 , snake_case :Dict="hamming_window" , snake_case :Tuple=32768.0 , snake_case :str=0.97 , snake_case :List[str]=1.0 , snake_case :Dict=True , snake_case :str=True , snake_case :Optional[Any]=False , **snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
A_ : Union[str, Any] = feature_size
A_ : int = sampling_rate
A_ : str = padding_value
A_ : int = hop_length
A_ : List[str] = win_length
A_ : Any = frame_signal_scale
A_ : str = preemphasis_coeff
A_ : List[str] = mel_floor
A_ : str = normalize_means
A_ : Any = normalize_vars
A_ : Optional[Any] = win_function
A_ : Dict = return_attention_mask
A_ : List[str] = win_length * sampling_rate // 1_000
A_ : List[str] = hop_length * sampling_rate // 1_000
A_ : List[str] = optimal_fft_length(self.sample_size )
A_ : str = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :np.array ):
'''simple docstring'''
if self.win_function == "hamming_window":
A_ : Dict = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
A_ : List[str] = window_function(window_length=self.sample_size , name=self.win_function )
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any , snake_case :Union[str, Any] , snake_case :str ):
'''simple docstring'''
if self.normalize_means:
A_ : int = x[:input_length].mean(axis=0 )
A_ : Any = np.subtract(snake_case , snake_case )
if self.normalize_vars:
A_ : List[Any] = x[:input_length].std(axis=0 )
A_ : Optional[int] = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
A_ : Optional[int] = padding_value
# make sure array is in float32
A_ : Union[str, Any] = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[np.ndarray] , snake_case :Optional[np.ndarray] = None ):
'''simple docstring'''
A_ : str = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self :int , snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case :Union[bool, str, PaddingStrategy] = False , snake_case :Optional[int] = None , snake_case :bool = False , snake_case :Optional[int] = None , snake_case :Optional[bool] = None , snake_case :Optional[Union[str, TensorType]] = None , snake_case :Optional[int] = None , **snake_case :Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
A_ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
A_ : int = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Tuple = [raw_speech]
# extract fbank features
A_ : int = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
A_ : Union[str, Any] = BatchFeature({"input_features": features} )
A_ : str = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
A_ : Optional[int] = padded_inputs.get("input_features" )
if isinstance(input_features[0] , snake_case ):
A_ : Union[str, Any] = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
A_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
A_ : Any = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ : Dict = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=snake_case )
if return_tensors is not None:
A_ : Dict = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 300
| 1
|
'''simple docstring'''
from math import factorial
def __magic_name__ ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
return sum(map(__UpperCAmelCase, str(factorial(__UpperCAmelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 72
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
a : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
config.addinivalue_line('''markers''', '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path_factory.getbasetemp() / '''cache'''
snake_case_ = test_hf_cache_home / '''datasets'''
snake_case_ = test_hf_cache_home / '''metrics'''
snake_case_ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''', str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''', str(__UpperCAmelCase ) )
snake_case_ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase, scope='''session''' )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''', __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''', __UpperCAmelCase )
| 72
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "resnet"
__UpperCamelCase: Union[str, Any] = ["basic", "bottleneck"]
def __init__( self : Tuple , A : Optional[int]=3 , A : Union[str, Any]=64 , A : Dict=[256, 512, 1024, 2048] , A : Tuple=[3, 4, 6, 3] , A : Optional[Any]="bottleneck" , A : int="relu" , A : List[Any]=False , A : Optional[int]=None , A : Union[str, Any]=None , **A : List[Any] , ):
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : int = embedding_size
_UpperCAmelCase : Union[str, Any] = hidden_sizes
_UpperCAmelCase : int = depths
_UpperCAmelCase : Any = layer_type
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = downsample_in_first_stage
_UpperCAmelCase : Dict = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(A ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = version.parse("1.11" )
@property
def _A ( self : Dict ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self : Dict ):
return 1E-3
| 31
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346
| 0
|
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __magic_name__ ( __UpperCAmelCase ):
def __init__( self : Optional[int] , snake_case__ : Callable , snake_case__ : Optional[Features] = None , snake_case__ : str = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[dict] = None , snake_case__ : Optional[int] = None , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
features=snake_case__ , cache_dir=snake_case__ , keep_in_memory=snake_case__ , streaming=snake_case__ , num_proc=snake_case__ , **snake_case__ , )
lowercase :int = Generator(
cache_dir=snake_case__ , features=snake_case__ , generator=snake_case__ , gen_kwargs=snake_case__ , **snake_case__ , )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase :str = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowercase :int = None
lowercase :List[Any] = None
lowercase :Union[str, Any] = None
lowercase :Optional[int] = None
self.builder.download_and_prepare(
download_config=snake_case__ , download_mode=snake_case__ , verification_mode=snake_case__ , base_path=snake_case__ , num_proc=self.num_proc , )
lowercase :List[str] = self.builder.as_dataset(
split='''train''' , verification_mode=snake_case__ , in_memory=self.keep_in_memory )
return dataset
| 351
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :str) -> YolosConfig:
lowercase :Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase :List[str] = 192
lowercase :List[str] = 768
lowercase :int = 12
lowercase :str = 3
lowercase :List[Any] = [800, 1333]
lowercase :Any = False
elif yolos_name == "yolos_s_dWr":
lowercase :List[str] = 330
lowercase :List[Any] = 14
lowercase :int = 6
lowercase :List[Any] = 1320
elif "yolos_s" in yolos_name:
lowercase :int = 384
lowercase :Union[str, Any] = 1536
lowercase :int = 12
lowercase :str = 6
elif "yolos_b" in yolos_name:
lowercase :Dict = [800, 1344]
lowercase :List[str] = 91
lowercase :List[Any] = '''huggingface/label-files'''
lowercase :Union[str, Any] = '''coco-detection-id2label.json'''
lowercase :int = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowercase :List[Any] = {int(a_): v for k, v in idalabel.items()}
lowercase :Dict = idalabel
lowercase :Tuple = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase (a_ :dict , a_ :YolosConfig , a_ :bool = False) -> Optional[int]:
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase :Dict = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""")
lowercase :List[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowercase :int = in_proj_weight[: config.hidden_size, :]
lowercase :List[str] = in_proj_bias[: config.hidden_size]
lowercase :Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase :int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase :Any = in_proj_weight[-config.hidden_size :, :]
lowercase :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase (a_ :str) -> str:
if "backbone" in name:
lowercase :Optional[int] = name.replace('''backbone''' , '''vit''')
if "cls_token" in name:
lowercase :List[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''')
if "det_token" in name:
lowercase :int = name.replace('''det_token''' , '''embeddings.detection_tokens''')
if "mid_pos_embed" in name:
lowercase :List[Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''')
if "pos_embed" in name:
lowercase :List[str] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''')
if "patch_embed.proj" in name:
lowercase :Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "blocks" in name:
lowercase :Any = name.replace('''blocks''' , '''encoder.layer''')
if "attn.proj" in name:
lowercase :Dict = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowercase :Tuple = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowercase :List[Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowercase :List[Any] = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowercase :Union[str, Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowercase :Dict = name.replace('''mlp.fc2''' , '''output.dense''')
if "class_embed" in name:
lowercase :Dict = name.replace('''class_embed''' , '''class_labels_classifier''')
if "bbox_embed" in name:
lowercase :Dict = name.replace('''bbox_embed''' , '''bbox_predictor''')
if "vit.norm" in name:
lowercase :Dict = name.replace('''vit.norm''' , '''vit.layernorm''')
return name
def lowerCamelCase (a_ :dict , a_ :YolosForObjectDetection) -> dict:
for key in orig_state_dict.copy().keys():
lowercase :List[Any] = orig_state_dict.pop(a_)
if "qkv" in key:
lowercase :str = key.split('''.''')
lowercase :List[str] = int(key_split[2])
lowercase :List[str] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase :List[Any] = val[:dim, :]
lowercase :Optional[int] = val[
dim : dim * 2, :
]
lowercase :Any = val[-dim:, :]
else:
lowercase :List[str] = val[:dim]
lowercase :Union[str, Any] = val[dim : dim * 2]
lowercase :List[Any] = val[-dim:]
else:
lowercase :List[str] = val
return orig_state_dict
def lowerCamelCase () -> torch.Tensor:
lowercase :Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase :Dict = Image.open(requests.get(a_ , stream=a_).raw)
return im
@torch.no_grad()
def lowerCamelCase (a_ :str , a_ :str , a_ :str , a_ :bool = False) -> List[Any]:
lowercase :Union[str, Any] = get_yolos_config(a_)
# load original state_dict
lowercase :List[str] = torch.load(a_ , map_location='''cpu''')['''model''']
# load 🤗 model
lowercase :Tuple = YolosForObjectDetection(a_)
model.eval()
lowercase :Dict = convert_state_dict(a_ , a_)
model.load_state_dict(a_)
# Check outputs on an image, prepared by YolosImageProcessor
lowercase :Tuple = 800 if yolos_name != '''yolos_ti''' else 512
lowercase :Dict = YolosImageProcessor(format='''coco_detection''' , size=a_)
lowercase :Optional[int] = image_processor(images=prepare_img() , return_tensors='''pt''')
lowercase :List[Any] = model(**a_)
lowercase , lowercase :Dict = outputs.logits, outputs.pred_boxes
lowercase , lowercase :int = None, None
if yolos_name == "yolos_ti":
lowercase :Dict = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]])
lowercase :Dict = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]])
elif yolos_name == "yolos_s_200_pre":
lowercase :Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]])
lowercase :List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]])
elif yolos_name == "yolos_s_300_pre":
lowercase :int = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]])
lowercase :Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]])
elif yolos_name == "yolos_s_dWr":
lowercase :int = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]])
lowercase :Dict = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]])
elif yolos_name == "yolos_base":
lowercase :Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]])
lowercase :Tuple = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]])
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""")
assert torch.allclose(logits[0, :3, :3] , a_ , atol=1E-4)
assert torch.allclose(pred_boxes[0, :3, :3] , a_ , atol=1E-4)
Path(a_).mkdir(exist_ok=a_)
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(a_)
print(F"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(a_)
if push_to_hub:
lowercase :Optional[int] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''')
lowercase :Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(a_ , organization='''hustvl''')
model.push_to_hub(a_ , organization='''hustvl''')
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 172
| 0
|
"""simple docstring"""
from math import factorial
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00 ) -> int:
'''simple docstring'''
return sum(int(UpperCamelCase__ ) for x in str(factorial(UpperCamelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 136
|
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> str:
'''simple docstring'''
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase__ )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.find(lowerCamelCase__ )
__lowerCamelCase = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(lowerCamelCase__ )
__lowerCamelCase = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=lowerCamelCase__ )
return mst
| 90
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.path.abspath(__SCREAMING_SNAKE_CASE )
logger.info(F'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
__snake_case : Optional[Any] = tf.train.list_variables(__SCREAMING_SNAKE_CASE )
__snake_case : Any = []
__snake_case : Union[str, Any] = []
__snake_case : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__snake_case : str = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(F'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
__snake_case : Optional[Any] = name[1:]
# figure out how many levels deep the name is
__snake_case : List[Any] = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(__SCREAMING_SNAKE_CASE )
# read data
__snake_case : Union[str, Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
names.append("""/""".join(__SCREAMING_SNAKE_CASE ) )
arrays.append(__SCREAMING_SNAKE_CASE )
logger.info(F'''Read a total of {len(__SCREAMING_SNAKE_CASE ):,} layers''' )
# Sanity check
if len(set(__SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(F'''Found layer names with different depths (layer depth {list(set(__SCREAMING_SNAKE_CASE ) )})''' )
__snake_case : Optional[int] = list(set(__SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = full_name.split("""/""" )
__snake_case : str = model
__snake_case : Optional[Any] = []
for i, m_name in enumerate(__SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
__snake_case : Optional[int] = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
__snake_case : str = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """encoder""" )
__snake_case : Dict = getattr(__SCREAMING_SNAKE_CASE , """layer""" )
__snake_case : int = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """pooler""" )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """token_type_embeddings""" )
else:
raise ValueError(F'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
__snake_case : str = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Dict = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """attention""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
__snake_case : List[str] = getattr(__SCREAMING_SNAKE_CASE , """output""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
__snake_case : Tuple = getattr(__SCREAMING_SNAKE_CASE , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
__snake_case : List[Any] = getattr(__SCREAMING_SNAKE_CASE , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
__snake_case : int = getattr(__SCREAMING_SNAKE_CASE , """intermediate""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
__snake_case : Any = getattr(__SCREAMING_SNAKE_CASE , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
__snake_case : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """weight""" )
else:
logger.warning(F'''Ignored {m_name}''' )
# for certain layers reshape is necessary
__snake_case : Any = """.""".join(__SCREAMING_SNAKE_CASE )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , __SCREAMING_SNAKE_CASE ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , __SCREAMING_SNAKE_CASE ):
__snake_case : int = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__snake_case : Union[str, Any] = array.transpose()
if pointer.shape == array.shape:
__snake_case : Union[str, Any] = torch.from_numpy(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(
F'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
F''' {array.shape}''' )
logger.info(F'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
# Instantiate model
logger.info(F'''Loading model based on config from {config_path}...''' )
__snake_case : List[Any] = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = BertModel(__SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(F'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(F'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowercase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 20
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Optional[int] = "microsoft/speecht5_tts"
A : List[Any] = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
A : str = "text_reader"
A : Optional[Any] = SpeechTaProcessor
A : Any = SpeechTaForTextToSpeech
A : Optional[Any] = SpeechTaHifiGan
A : str = ["text"]
A : Union[str, Any] = ["audio"]
def snake_case__ ( self : List[Any] ):
if self.post_processor is None:
__snake_case : Tuple = """microsoft/speecht5_hifigan"""
super().setup()
def snake_case__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple=None ):
__snake_case : str = self.pre_processor(text=_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__snake_case : List[Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__snake_case : str = torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def snake_case__ ( self : List[Any] , _lowerCAmelCase : Dict ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : int ):
with torch.no_grad():
return self.post_processor(_lowerCAmelCase ).cpu().detach()
| 20
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( _a ):
def __init__( self : str , __lowerCamelCase : CLIPSegForImageSegmentation , __lowerCamelCase : CLIPSegProcessor , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
UpperCamelCase :Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __lowerCamelCase , standard_warn=__lowerCamelCase )
UpperCamelCase :Optional[int] = dict(scheduler.config )
UpperCamelCase :str = 1
UpperCamelCase :List[str] = FrozenDict(__lowerCamelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase :Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __lowerCamelCase , standard_warn=__lowerCamelCase )
UpperCamelCase :Dict = dict(scheduler.config )
UpperCamelCase :str = True
UpperCamelCase :Tuple = FrozenDict(__lowerCamelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__lowerCamelCase , segmentation_processor=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , )
def _A ( self : List[str] , __lowerCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase :Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def _A ( self : List[str] ):
self.enable_attention_slicing(__lowerCamelCase )
def _A ( self : Dict ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCamelCase :Tuple = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCamelCase , __lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Tuple ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase : str , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Tuple , ):
UpperCamelCase :Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
UpperCamelCase :int = self.segmentation_model(**__lowerCamelCase )
UpperCamelCase :Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase :List[Any] = self.numpy_to_pil(__lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase :int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__lowerCamelCase , image=__lowerCamelCase , mask_image=__lowerCamelCase , height=__lowerCamelCase , width=__lowerCamelCase , num_inference_steps=__lowerCamelCase , guidance_scale=__lowerCamelCase , negative_prompt=__lowerCamelCase , num_images_per_prompt=__lowerCamelCase , eta=__lowerCamelCase , generator=__lowerCamelCase , latents=__lowerCamelCase , output_type=__lowerCamelCase , return_dict=__lowerCamelCase , callback=__lowerCamelCase , callback_steps=__lowerCamelCase , )
| 38
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 125
| 0
|
import os
import sys
import unittest
lowercase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : Dict = find_backend(" if not is_torch_available():" )
self.assertEqual(a , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__UpperCamelCase : Optional[Any] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(a , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__UpperCamelCase : Optional[Any] = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(a , "torch_and_transformers_and_onnx" )
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
__UpperCamelCase : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , a )
self.assertIn("torch_and_transformers" , a )
self.assertIn("flax_and_transformers" , a )
self.assertIn("torch_and_transformers_and_onnx" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : int = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(a , "\nCONSTANT = None\n" )
__UpperCamelCase : Optional[Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
a , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__UpperCamelCase : Union[str, Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__UpperCamelCase : int = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(a , a )
def _lowerCamelCase ( self :Tuple ) -> Union[str, Any]:
__UpperCamelCase : str = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__UpperCamelCase : Optional[Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , a )
| 151
|
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 2) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCamelCase : List[str] = qubits
# Using Aer's simulator
__UpperCamelCase : int = qiskit.Aer.get_backend("aer_simulator")
# Creating a Quantum Circuit acting on the q register
__UpperCamelCase : List[str] = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase)
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0)
for i in range(1 , _lowerCamelCase):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase)
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase)) , list(range(_lowerCamelCase)))
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__UpperCamelCase : Any = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_000)
return job.result().get_counts(_lowerCamelCase)
if __name__ == "__main__":
print(f"Total count for various states are: {quantum_entanglement(3)}")
| 151
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class __snake_case ( _lowercase):
def __init__( self : str ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Optional[int] = False
while not completed:
if counter == 1:
self.reset()
_lowerCamelCase : Optional[int] = self.advance()
if not self.does_advance(__lowerCAmelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.update(__lowerCAmelCase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Tuple=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __snake_case ( _lowercase):
def __init__( self : Optional[int] , __lowerCAmelCase : List[int] ):
"""simple docstring"""
super(__lowerCAmelCase , self ).__init__()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
_lowerCamelCase : Union[str, Any] = token_ids
_lowerCamelCase : Dict = len(self.token_ids )
_lowerCamelCase : List[str] = -1 # the index of the currently fulfilled step
_lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' )
_lowerCamelCase : List[str] = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : Optional[int] = False
if self.does_advance(__lowerCAmelCase ):
self.fulfilled_idx += 1
_lowerCamelCase : List[Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = completed
else:
# failed to make progress.
_lowerCamelCase : str = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : str = False
_lowerCamelCase : Tuple = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict=False ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = PhrasalConstraint(self.token_ids )
if stateful:
_lowerCamelCase : Any = self.seqlen
_lowerCamelCase : List[Any] = self.fulfilled_idx
_lowerCamelCase : Dict = self.completed
return new_constraint
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : List[List[int]] , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Any = max([len(__lowerCAmelCase ) for one in nested_token_ids] )
_lowerCamelCase : Any = {}
for token_ids in nested_token_ids:
_lowerCamelCase : Dict = root
for tidx, token_id in enumerate(__lowerCAmelCase ):
if token_id not in level:
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Union[str, Any] = level[token_id]
if no_subsets and self.has_subsets(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
_lowerCamelCase : Dict = root
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.trie
for current_token in current_seq:
_lowerCamelCase : str = start[current_token]
_lowerCamelCase : Any = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = self.next_tokens(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 0
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Dict = list(root.values() )
if len(__lowerCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowerCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.count_leaves(__lowerCAmelCase )
return len(__lowerCAmelCase ) != leaf_count
class __snake_case ( _lowercase):
def __init__( self : Dict , __lowerCAmelCase : List[List[int]] ):
"""simple docstring"""
super(__lowerCAmelCase , self ).__init__()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
_lowerCamelCase : str = DisjunctiveTrie(__lowerCAmelCase )
_lowerCamelCase : Any = nested_token_ids
_lowerCamelCase : List[Any] = self.trie.max_height
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Any = self.trie.next_tokens(self.current_seq )
if len(__lowerCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' )
_lowerCamelCase : int = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCAmelCase )}''' )
_lowerCamelCase : int = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[Any] = False
if self.does_advance(__lowerCAmelCase ):
self.current_seq.append(__lowerCAmelCase )
_lowerCamelCase : List[str] = True
else:
_lowerCamelCase : List[Any] = True
self.reset()
_lowerCamelCase : List[str] = self.trie.reached_leaf(self.current_seq )
_lowerCamelCase : int = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = False
_lowerCamelCase : List[str] = []
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : str=False ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
_lowerCamelCase : int = self.seqlen
_lowerCamelCase : List[str] = self.current_seq
_lowerCamelCase : Union[str, Any] = self.completed
return new_constraint
class __snake_case :
def __init__( self : str , __lowerCAmelCase : List[Constraint] ):
"""simple docstring"""
_lowerCamelCase : Any = constraints
# max # of steps required to fulfill a given constraint
_lowerCamelCase : str = max([c.seqlen for c in constraints] )
_lowerCamelCase : List[Any] = len(__lowerCAmelCase )
_lowerCamelCase : str = False
self.init_state()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = [constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_lowerCamelCase : Union[str, Any] = constraint.advance()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
token_list.append(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
token_list.extend(__lowerCAmelCase )
else:
_lowerCamelCase : Any = self.inprogress_constraint.advance()
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
token_list.append(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
token_list.extend(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_lowerCamelCase , _lowerCamelCase : str = self.add(__lowerCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
_lowerCamelCase , _lowerCamelCase : List[str] = False, False
if self.completed:
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = self.inprogress_constraint.update(__lowerCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCAmelCase ) )
_lowerCamelCase : List[str] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_lowerCamelCase : str = None
if len(self.pending_constraints ) == 0:
# we're done!
_lowerCamelCase : Any = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowerCAmelCase ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = pending_constraint.update(__lowerCAmelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = None
if not complete and stepped:
_lowerCamelCase : Union[str, Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_lowerCamelCase : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_lowerCamelCase : List[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Dict=True ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_lowerCamelCase : Optional[Any] = [
constraint.copy(stateful=__lowerCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_lowerCamelCase : List[str] = self.inprogress_constraint.copy(stateful=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 72
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( a ) -> int:
_A: Any = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A: Dict = True if '''large''' in model_name or '''huge''' in model_name else False
_A: Tuple = True if '''large''' in model_name or '''huge''' in model_name else False
_A: Any = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A: Tuple = [3, 3, 3, 3]
_A: List[Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
_A: Tuple = [4, 4, 4, 4]
_A: List[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A: Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
_A: List[str] = [3, 3, 3, 3]
else:
_A: List[str] = [2, 2, 2, 2]
if "tiny" in model_name:
_A: Optional[Any] = 96
elif "small" in model_name:
_A: Optional[int] = 96
elif "base" in model_name:
_A: int = 1_28
elif "large" in model_name:
_A: List[Any] = 1_92
elif "xlarge" in model_name:
_A: int = 2_56
elif "huge" in model_name:
_A: Dict = 3_52
# set label information
_A: str = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A: List[Any] = '''imagenet-22k-id2label.json'''
else:
_A: Any = '''imagenet-1k-id2label.json'''
_A: List[Any] = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
_A: int = {int(a ): v for k, v in idalabel.items()}
_A: int = {v: k for k, v in idalabel.items()}
_A: Dict = FocalNetConfig(
embed_dim=a , depths=a , focal_levels=a , focal_windows=a , use_conv_embed=a , idalabel=a , labelaid=a , use_post_layernorm=a , use_layerscale=a , )
return config
def lowerCamelCase__ ( a ) -> Optional[int]:
if "patch_embed.proj" in name:
_A: Union[str, Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A: List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A: Optional[int] = '''encoder.''' + name
if "encoder.layers" in name:
_A: Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A: Any = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A: Tuple = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A: List[Any] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A: Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A: Union[str, Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A: Tuple = '''layernorm.weight'''
if name == "norm.bias":
_A: int = '''layernorm.bias'''
if "head" in name:
_A: Optional[int] = name.replace('''head''' , '''classifier''' )
else:
_A: Optional[Any] = '''focalnet.''' + name
return name
def lowerCamelCase__ ( a , a , a=False ) -> str:
# fmt: off
_A: int = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A: Union[str, Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , a )
_A: Any = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A: int = state_dict.pop(a )
_A: Any = val
_A: Any = get_focalnet_config(a )
_A: str = FocalNetForImageClassification(a )
model.eval()
# load state dict
model.load_state_dict(a )
# verify conversion
_A: List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A: Any = BitImageProcessor(
do_resize=a , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=a , crop_size=2_24 , do_normalize=a , image_mean=a , image_std=a , )
_A: Union[str, Any] = Image.open(requests.get(a , stream=a ).raw )
_A: Optional[Any] = processor(images=a , return_tensors='''pt''' )
_A: Union[str, Any] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_A: Any = image_transforms(a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , a , atol=1E-4 )
_A: Tuple = model(**a )
_A: Optional[Any] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A: Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_A: str = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_A: Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_A: List[str] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_A: Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_A: List[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
processor.save_pretrained(a )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCAmelCase__ : Dict = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 301
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301
| 1
|
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_snake_case = logging.get_logger(__name__)
# General docstring
_snake_case = "MobileNetV1Config"
# Base docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = [1, 10_24, 7, 7]
# Image classification docstring
_snake_case = "google/mobilenet_v1_1.0_224"
_snake_case = "tabby, tabby cat"
_snake_case = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase : Dict = model.mobilenet_va
else:
lowerCamelCase : Dict = model
lowerCamelCase : Optional[int] = 'MobilenetV1/Conv2d_0/'
lowerCamelCase : Dict = backbone.conv_stem.convolution.weight
lowerCamelCase : str = backbone.conv_stem.normalization.bias
lowerCamelCase : Any = backbone.conv_stem.normalization.weight
lowerCamelCase : List[Any] = backbone.conv_stem.normalization.running_mean
lowerCamelCase : List[Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
lowerCamelCase : Optional[int] = i + 1
lowerCamelCase : Optional[int] = i * 2
lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
lowerCamelCase : Optional[int] = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowerCamelCase : Dict = pointer.convolution.weight
lowerCamelCase : List[str] = pointer.normalization.bias
lowerCamelCase : str = pointer.normalization.weight
lowerCamelCase : List[Any] = pointer.normalization.running_mean
lowerCamelCase : List[Any] = pointer.normalization.running_var
lowerCamelCase : str = backbone.layer[pt_index + 1]
lowerCamelCase : Optional[int] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowerCamelCase : int = pointer.convolution.weight
lowerCamelCase : Optional[Any] = pointer.normalization.bias
lowerCamelCase : int = pointer.normalization.weight
lowerCamelCase : Tuple = pointer.normalization.running_mean
lowerCamelCase : List[Any] = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase : Tuple = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowerCamelCase : Any = model.classifier.weight
lowerCamelCase : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
lowerCamelCase : Optional[Any] = tf.train.list_variables(UpperCAmelCase_ )
lowerCamelCase : Dict = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
lowerCamelCase : List[str] = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : Any = array
# Build TF to PyTorch weights loading map
lowerCamelCase : Dict = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
lowerCamelCase : Dict = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
lowerCamelCase : Optional[Any] = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
lowerCamelCase : Any = array.squeeze().transpose()
else:
lowerCamelCase : Any = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp" , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp_1" , UpperCAmelCase_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , UpperCAmelCase_ )
logger.info(f"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = features.shape[-2:]
lowerCamelCase : Optional[int] = conv_layer.stride
lowerCamelCase : List[str] = conv_layer.kernel_size
if in_height % stride_height == 0:
lowerCamelCase : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
lowerCamelCase : Optional[Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowerCamelCase : Dict = max(kernel_width - stride_width , 0 )
else:
lowerCamelCase : Optional[int] = max(kernel_width - (in_width % stride_width) , 0 )
lowerCamelCase : Optional[Any] = pad_along_width // 2
lowerCamelCase : Optional[Any] = pad_along_width - pad_left
lowerCamelCase : Tuple = pad_along_height // 2
lowerCamelCase : int = pad_along_height - pad_top
lowerCamelCase : List[str] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , "constant" , 0.0 )
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A = 1 , __A = 1 , __A = False , __A = True , __A = True , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : List[str] = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowerCamelCase : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowerCamelCase : Tuple = nn.Convad(
in_channels=_A , out_channels=_A , kernel_size=_A , stride=_A , padding=_A , groups=_A , bias=_A , padding_mode="zeros" , )
if use_normalization:
lowerCamelCase : List[Any] = nn.BatchNormad(
num_features=_A , eps=config.layer_norm_eps , momentum=0.9997 , affine=_A , track_running_stats=_A , )
else:
lowerCamelCase : Any = None
if use_activation:
if isinstance(_A , _A ):
lowerCamelCase : Optional[Any] = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _A ):
lowerCamelCase : Tuple = ACTaFN[config.hidden_act]
else:
lowerCamelCase : List[Any] = config.hidden_act
else:
lowerCamelCase : int = None
def _snake_case ( self , __A ):
"""simple docstring"""
if self.config.tf_padding:
lowerCamelCase : List[str] = apply_tf_padding(_A , self.convolution )
lowerCamelCase : List[str] = self.convolution(_A )
if self.normalization is not None:
lowerCamelCase : int = self.normalization(_A )
if self.activation is not None:
lowerCamelCase : Tuple = self.activation(_A )
return features
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = MobileNetVaConfig
__A : int = load_tf_weights_in_mobilenet_va
__A : Any = """mobilenet_v1"""
__A : int = """pixel_values"""
__A : List[str] = False
def _snake_case ( self , __A ):
"""simple docstring"""
if isinstance(_A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_A , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_snake_case = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_snake_case = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A = True ):
"""simple docstring"""
super().__init__(_A )
lowerCamelCase : Dict = config
lowerCamelCase : Any = 32
lowerCamelCase : int = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowerCamelCase : Tuple = MobileNetVaConvLayer(
_A , in_channels=config.num_channels , out_channels=_A , kernel_size=3 , stride=2 , )
lowerCamelCase : Dict = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowerCamelCase : Tuple = nn.ModuleList()
for i in range(13 ):
lowerCamelCase : Dict = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowerCamelCase : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_A , in_channels=_A , out_channels=_A , kernel_size=3 , stride=strides[i] , groups=_A , ) )
self.layer.append(
MobileNetVaConvLayer(
_A , in_channels=_A , out_channels=_A , kernel_size=1 , ) )
lowerCamelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self , __A ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , __A = None , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCamelCase : List[Any] = self.conv_stem(_A )
lowerCamelCase : Optional[int] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowerCamelCase : Dict = layer_module(_A )
if output_hidden_states:
lowerCamelCase : Dict = all_hidden_states + (hidden_states,)
lowerCamelCase : Union[str, Any] = hidden_states
if self.pooler is not None:
lowerCamelCase : Union[str, Any] = torch.flatten(self.pooler(_A ) , start_dim=1 )
else:
lowerCamelCase : Optional[int] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=_A , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
super().__init__(_A )
lowerCamelCase : Any = config.num_labels
lowerCamelCase : List[str] = MobileNetVaModel(_A )
lowerCamelCase : int = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowerCamelCase : List[str] = nn.Dropout(config.classifier_dropout_prob , inplace=_A )
lowerCamelCase : Optional[Any] = nn.Linear(_A , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , __A = None , __A = None , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase : Union[str, Any] = self.mobilenet_va(_A , output_hidden_states=_A , return_dict=_A )
lowerCamelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
lowerCamelCase : List[str] = self.classifier(self.dropout(_A ) )
lowerCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCamelCase : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCamelCase : Optional[int] = 'single_label_classification'
else:
lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
lowerCamelCase : Any = MSELoss()
if self.num_labels == 1:
lowerCamelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCamelCase : Dict = loss_fct(_A , _A )
elif self.config.problem_type == "single_label_classification":
lowerCamelCase : str = CrossEntropyLoss()
lowerCamelCase : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCamelCase : Any = BCEWithLogitsLoss()
lowerCamelCase : Any = loss_fct(_A , _A )
if not return_dict:
lowerCamelCase : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_A , logits=_A , hidden_states=outputs.hidden_states , )
| 283
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
_a : Optional[int]= False
_a : int= False
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(UpperCAmelCase_ )
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Any:
__snake_case : Any = parser.add_parser('train' , help='CLI tool to train a model on a task.')
train_parser.add_argument(
'--train_data' , type=_A , required=_A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_A , default=0 , help='Column of the dataset csv file with example labels.')
train_parser.add_argument(
'--column_text' , type=_A , default=1 , help='Column of the dataset csv file with example texts.')
train_parser.add_argument(
'--column_id' , type=_A , default=2 , help='Column of the dataset csv file with example ids.')
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).')
train_parser.add_argument('--validation_data' , type=_A , default='' , help='path to validation dataset.')
train_parser.add_argument(
'--validation_split' , type=_A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_A , default='./' , help='path to saved the trained model.')
train_parser.add_argument(
'--task' , type=_A , default='text_classification' , help='Task to train the model on.')
train_parser.add_argument(
'--model' , type=_A , default='bert-base-uncased' , help='Model\'s name or path to stored model.')
train_parser.add_argument('--train_batch_size' , type=_A , default=32 , help='Batch size for training.')
train_parser.add_argument('--valid_batch_size' , type=_A , default=64 , help='Batch size for validation.')
train_parser.add_argument('--learning_rate' , type=_A , default=3E-5 , help='Learning rate.')
train_parser.add_argument('--adam_epsilon' , type=_A , default=1E-08 , help='Epsilon for Adam optimizer.')
train_parser.set_defaults(func=_A)
def __init__(self : int , _A : Namespace) -> Tuple:
__snake_case : Optional[int] = logging.get_logger('transformers-cli/training')
__snake_case : Optional[int] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_A)
__snake_case : List[Any] = args.output
__snake_case : Any = args.column_label
__snake_case : str = args.column_text
__snake_case : Any = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}")
if args.task == "text_classification":
__snake_case : List[str] = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}")
__snake_case : List[Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}")
__snake_case : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case : List[str] = args.validation_split
__snake_case : str = args.train_batch_size
__snake_case : Any = args.valid_batch_size
__snake_case : Union[str, Any] = args.learning_rate
__snake_case : str = args.adam_epsilon
def _lowercase (self : List[str]) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase (self : str) -> int:
raise NotImplementedError
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| 172
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ : Dict = logging.get_logger(__name__)
UpperCamelCase_ : Union[str, Any] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = """ibert"""
def __init__( self ,_SCREAMING_SNAKE_CASE=30_522 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE="none" ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = quant_mode
_snake_case = force_dequant
class _a ( __lowerCAmelCase ):
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 142
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : List[Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Tuple = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 142
| 1
|
import os
import sys
import unittest
lowercase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : List[Any] = os.path.join(git_repo_path, """src""", """transformers""")
lowercase : List[Any] = """
{0} = None
"""
lowercase : Tuple = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowercase : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(snake_case )
lowercase : List[str] = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(snake_case ,"""tokenizers""" )
lowercase : List[Any] = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(snake_case ,"""tensorflow_text""" )
lowercase : Any = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(snake_case ,"""sentencepiece_and_tokenizers""" )
lowercase : Tuple = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(snake_case ,"""sentencepiece_and_tensorflow_text""" )
lowercase : Union[str, Any] = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(snake_case ,"""sentencepiece_and_tokenizers_and_vision""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" ,snake_case )
self.assertIn("""tensorflow_text""" ,snake_case )
self.assertIn("""sentencepiece_and_tokenizers""" ,snake_case )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertModel""" ,objects["""tf"""] )
self.assertIn("""FlaxBertModel""" ,objects["""flax"""] )
self.assertIn("""BertModel""" ,objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" ,objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" ,objects["""sentencepiece_and_tokenizers"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = create_dummy_object("""CONSTANT""" ,"""'torch'""" )
self.assertEqual(snake_case ,"""\nCONSTANT = None\n""" )
lowercase : Any = create_dummy_object("""function""" ,"""'torch'""" )
self.assertEqual(
snake_case ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
lowercase : Any = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
lowercase : int = create_dummy_object("""FakeClass""" ,"""'torch'""" )
self.assertEqual(snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
lowercase : Union[str, Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] ,snake_case )
| 20
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase : Any = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Union[str, Any] = tmp_path / """cache"""
lowercase : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase : str = features.copy() if features else default_expected_features
lowercase : Optional[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase : Optional[int] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con:
lowercase : Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = tmp_path / """cache"""
lowercase : int = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase : List[str] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : Dict = tmp_path / """cache"""
lowercase : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase : Optional[int] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = iter_sql_file(SCREAMING_SNAKE_CASE__ )
for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
lowercase : str = tmp_path / """cache"""
lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__ , """tmp.sql""" )
lowercase : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 20
| 1
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores', type=lowercase_, default=1, help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script', type=lowercase_, help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
), )
# rest from the training program
parser.add_argument('training_script_args', nargs=lowercase_ )
return parser.parse_args()
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_ : str = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_ : Any = script_fpath.stem
SCREAMING_SNAKE_CASE_ : Optional[Any] = importlib.import_module(lowercase_ )
# Patch sys.argv
SCREAMING_SNAKE_CASE_ : List[str] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 362
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Node(1 )
SCREAMING_SNAKE_CASE_ : Any = Node(2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Node(3 )
SCREAMING_SNAKE_CASE_ : int = Node(4 )
SCREAMING_SNAKE_CASE_ : List[str] = Node(5 )
return tree
def a__ ( A__ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a__ ( A__ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a__ ( A__ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a__ ( A__ ):
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : list[Any] = []
if root is None:
return output
SCREAMING_SNAKE_CASE_ : int = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE_ : List[str] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : list[Any] = []
def populate_output(A__, A__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(A__, A__ )
return output
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : list[Any] = []
def populate_output(A__, A__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(A__, A__ )
return output
def a__ ( A__ ):
if root is None:
return []
SCREAMING_SNAKE_CASE_ : list[Sequence[Node | None]] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = height(A__ )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A__, A__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(A__, A__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
return output
def a__ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE_ : Optional[int] = make_tree()
print(F'''In-order Traversal: {inorder(A__ )}''' )
print(F'''Pre-order Traversal: {preorder(A__ )}''' )
print(F'''Post-order Traversal: {postorder(A__ )}''', '\n' )
print(F'''Height of Tree: {height(A__ )}''', '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A__ ), '\n' )
print('Level-wise order Traversal: ' )
for level in range(1, height(A__ ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(A__, level=A__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 162
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : Tuple = split_dict._to_yaml_list()
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
UpperCAmelCase : Dict = SplitDict._from_yaml_list(UpperCAmelCase_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCAmelCase : Dict = None
# the split name of split_dict takes over the name of the split info object
UpperCAmelCase : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=UpperCAmelCase_ ), SplitInfo(dataset_name='my_dataset' )] )
def UpperCamelCase( UpperCAmelCase_ ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
UpperCAmelCase : Union[str, Any] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 151
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=2 , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : List[str]=10 , lowercase_ : Optional[Any]=3 , lowercase_ : List[str]=32 * 4 , lowercase_ : str=32 * 6 , lowercase_ : List[Any]=4 , lowercase_ : List[Any]=32 , ) -> Optional[int]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : int = is_training
UpperCAmelCase : int = use_auxiliary_loss
UpperCAmelCase : List[Any] = num_queries
UpperCAmelCase : List[str] = num_channels
UpperCAmelCase : List[str] = min_size
UpperCAmelCase : Dict = max_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : str = mask_feature_size
def UpperCAmelCase_ ( self : int ) -> int:
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowercase_ )
UpperCAmelCase : Tuple = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ )
UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5
).float()
UpperCAmelCase : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long()
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> int:
UpperCAmelCase : int = output.encoder_hidden_states
UpperCAmelCase : Any = output.pixel_decoder_hidden_states
UpperCAmelCase : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict=False ) -> Tuple:
with torch.no_grad():
UpperCAmelCase : str = MaskFormerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase : List[str] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Union[str, Any] = model(lowercase_ , output_hidden_states=lowercase_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : str ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = MaskFormerForInstanceSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
def comm_check_on_output(lowercase_ : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(pixel_values=lowercase_ , pixel_mask=lowercase_ )
UpperCAmelCase : Dict = model(lowercase_ )
comm_check_on_output(lowercase_ )
UpperCAmelCase : Any = model(
pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
comm_check_on_output(lowercase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ ( _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCAmelCase_ : Optional[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : List[str] = False
UpperCAmelCase_ : Tuple = False
def UpperCAmelCase_ ( self : Any ) -> int:
UpperCAmelCase : Optional[Any] = MaskFormerModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Any:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def UpperCAmelCase_ ( self : str ) -> List[str]:
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowercase_ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : str = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCAmelCase : Tuple = MaskFormerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase : str = {
'pixel_values': torch.randn((2, 3, *size) , device=lowercase_ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowercase_ ),
'class_labels': torch.zeros(2 , 10 , device=lowercase_ ).long(),
}
UpperCAmelCase : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ )
UpperCAmelCase : Optional[int] = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowercase_ ).to(lowercase_ )
UpperCAmelCase : List[Any] = model(**lowercase_ , output_attentions=lowercase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self : Dict ) -> str:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Dict = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : Tuple = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[str] ) -> str:
# only MaskFormerForInstanceSegmentation has the loss
UpperCAmelCase : Optional[int] = self.all_model_classes[1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase : List[str] = True
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : List[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase : List[str] = model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ )
UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowercase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase__ = 1e-4
def UpperCamelCase( ):
UpperCAmelCase : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(lowercase_ )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Optional[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**lowercase_ )
UpperCAmelCase : str = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
UpperCAmelCase : Tuple = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowercase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : List[str] ) -> int:
UpperCAmelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : List[Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
UpperCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : str = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Union[str, Any] = image_processor(lowercase_ , return_tensors='pt' ).to(lowercase_ )
UpperCAmelCase : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowercase_ , (1, 3, 800, 1_088) )
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
# masks_queries_logits
UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCAmelCase : int = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCAmelCase : str = torch.tensor(lowercase_ ).to(lowercase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) )
# class_queries_logits
UpperCAmelCase : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) )
def UpperCAmelCase_ ( self : Any ) -> Dict:
UpperCAmelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(lowercase_ )
.eval()
)
UpperCAmelCase : Union[str, Any] = self.default_image_processor
UpperCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase : Optional[int] = inputs['pixel_values'].to(lowercase_ )
UpperCAmelCase : Optional[Any] = [el.to(lowercase_ ) for el in inputs['mask_labels']]
UpperCAmelCase : List[str] = [el.to(lowercase_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase : Tuple = model(**lowercase_ )
self.assertTrue(outputs.loss is not None )
| 151
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = TextToVideoSDPipeline
A_ : List[Any] = TEXT_TO_IMAGE_PARAMS
A_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A_ : List[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_a = CLIPTextModel(__UpperCAmelCase )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict:
if str(__UpperCAmelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCAmelCase )
else:
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _UpperCAmelCase ( self ) -> List[str]:
_a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = TextToVideoSDPipeline(**__UpperCAmelCase )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = self.get_dummy_inputs(__UpperCAmelCase )
_a = '''np'''
_a = sd_pipe(**__UpperCAmelCase ).frames
_a = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
_a = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _UpperCAmelCase ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
_a = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
_a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
_a = pipe.to('''cuda''' )
_a = '''Spiderman is surfing'''
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames
_a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _UpperCAmelCase ( self ) -> str:
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
_a = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
_a = pipe.to('''cuda''' )
_a = '''Spiderman is surfing'''
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames
_a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 353
|
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def A_ ( _lowerCAmelCase : str="ro", _lowerCAmelCase : Optional[Any]="en", _lowerCAmelCase : Union[str, Any]="wmt16", _lowerCAmelCase : int=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
_a = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
_a = datasets.load_dataset(_lowerCAmelCase, _lowerCAmelCase )
if save_dir is None:
_a = f'{dataset}-{pair}'
_a = Path(_lowerCAmelCase )
save_dir.mkdir(exist_ok=_lowerCAmelCase )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
_a = '''val''' if split == '''validation''' else split
_a = save_dir.joinpath(f'{fn}.source' )
_a = save_dir.joinpath(f'{fn}.target' )
_a = src_path.open('''w+''' )
_a = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
_a = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 153
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 301
|
"""simple docstring"""
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[0 for _ in range(_lowerCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__lowerCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 301
| 1
|
from __future__ import annotations
import bisect
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase : str = len(_UpperCAmelCase )
while lo < hi:
lowerCAmelCase : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowerCAmelCase : int = mid + 1
else:
lowerCAmelCase : int = mid
return lo
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowerCAmelCase : str = len(_UpperCAmelCase )
while lo < hi:
lowerCAmelCase : Any = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowerCAmelCase : Optional[Any] = mid + 1
else:
lowerCAmelCase : Optional[Any] = mid
return lo
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = 0, _UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : List[Any] = len(_UpperCAmelCase ) - 1
while left <= right:
lowerCAmelCase : Tuple = left + (right - left) // 2
lowerCAmelCase : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowerCAmelCase : Tuple = midpoint - 1
else:
lowerCAmelCase : Optional[Any] = midpoint + 1
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
lowerCAmelCase : Dict = bisect.bisect_left(_UpperCAmelCase, _UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
lowerCAmelCase : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase, _UpperCAmelCase, midpoint + 1, _UpperCAmelCase )
if __name__ == "__main__":
__A : Union[str, Any] = input('''Enter numbers separated by comma:\n''').strip()
__A : Dict = sorted(int(item) for item in user_input.split(''','''))
__A : List[str] = int(input('''Enter a single number to be found in the list:\n'''))
__A : List[str] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 323
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 1
|
_A : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
_A : List[Any] = ['a', 'b', 'c', 'd', 'e']
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : List[Any] = start
# add current to visited
visited.append(UpperCAmelCase )
lowerCamelCase__ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowerCamelCase__ : Optional[int] = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# if all neighbors visited add current to sort
sort.append(UpperCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
for vertice in vertices:
if vertice not in visited:
lowerCamelCase__ : Optional[int] = topological_sort(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_A : Any = topological_sort('a', [], [])
print(sort)
| 142
|
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , A : int = 6 ) ->None:
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
self.create_linked_list(A )
def __lowerCamelCase ( self : Optional[int] , A : int ) ->None:
lowerCamelCase__ : Optional[int] = Node()
lowerCamelCase__ : List[str] = current_node
lowerCamelCase__ : Union[str, Any] = current_node
lowerCamelCase__ : List[str] = current_node
for _ in range(1 , A ):
lowerCamelCase__ : List[str] = Node()
lowerCamelCase__ : List[Any] = current_node
lowerCamelCase__ : Optional[Any] = previous_node
lowerCamelCase__ : Dict = current_node
lowerCamelCase__ : Union[str, Any] = self.front
lowerCamelCase__ : int = previous_node
def __lowerCamelCase ( self : Optional[int] ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCamelCase ( self : Optional[int] ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCamelCase ( self : Optional[int] , A : Any ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase__ : List[str] = self.rear.next
if self.rear:
lowerCamelCase__ : Optional[Any] = data
def __lowerCamelCase ( self : str ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase__ : List[Any] = self.front.data
lowerCamelCase__ : Optional[Any] = None
return data
lowerCamelCase__ : Optional[int] = self.front
lowerCamelCase__ : Optional[int] = old_front.next
lowerCamelCase__ : Any = old_front.data
lowerCamelCase__ : List[str] = None
return data
def __lowerCamelCase ( self : Dict ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCamelCase ( self : int ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ) ->None:
lowerCamelCase__ : Any | None = None
lowerCamelCase__ : Node | None = None
lowerCamelCase__ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
UpperCamelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[Any]=1_8 , UpperCAmelCase_ : int=3_0 , UpperCAmelCase_ : int=4_0_0 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Union[str, Any]=None , ):
"""simple docstring"""
a : Tuple = size if size is not None else {'''height''': 2_0, '''width''': 2_0}
a : Optional[Any] = parent
a : Optional[int] = batch_size
a : List[Any] = num_channels
a : List[Any] = image_size
a : int = min_resolution
a : Tuple = max_resolution
a : Tuple = size
a : Tuple = do_normalize
a : Dict = do_convert_rgb
a : List[str] = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
a : Any = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : List[Any] = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
a : int = Image.open(requests.get(_a , stream=_a).raw).convert('RGB')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
A : List[Any] = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[int] = PixaStructImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_a , 'do_normalize'))
self.assertTrue(hasattr(_a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[str] = self.image_processor_tester.prepare_dummy_image()
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
a : Any = 2_0_4_8
a : Dict = image_processor(_a , return_tensors='pt' , max_patches=_a)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06) , atol=1e-3 , rtol=1e-3))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a)
for image in image_inputs:
self.assertIsInstance(_a , Image.Image)
# Test not batched input
a : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Union[str, Any] = image_processor(
_a , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a)
for image in image_inputs:
self.assertIsInstance(_a , Image.Image)
# Test not batched input
a : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
a : str = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_a):
a : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a).flattened_patches
a : Any = '''Hello'''
a : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a , header_text=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : int = image_processor(
_a , return_tensors='pt' , max_patches=_a , header_text=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a)
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray)
a : str = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : List[Any] = image_processor(
_a , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a)
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor)
# Test not batched input
a : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : Any = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : Any = image_processor(
_a , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
A : List[str] = PixaStructImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = PixaStructImageProcessingTester(self , num_channels=4)
a : Tuple = 3
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_a , 'do_normalize'))
self.assertTrue(hasattr(_a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a)
for image in image_inputs:
self.assertIsInstance(_a , Image.Image)
# Test not batched input
a : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a : str = image_processor(
_a , return_tensors='pt' , max_patches=_a).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 365
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _A ( self : int ):
_UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet
_UpperCAmelCase : Any = ScoreSdeVeScheduler()
_UpperCAmelCase : str = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A , return_dict=A )[
0
]
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
_UpperCAmelCase : int = "google/ncsnpp-church-256"
_UpperCAmelCase : Any = UNetaDModel.from_pretrained(A )
_UpperCAmelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(A )
_UpperCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 31
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class A__ :
lowercase = 42
lowercase = None
lowercase = None
def UpperCAmelCase__ ( ) -> Node | None:
A_ = Node(1 )
A_ = Node(2 )
A_ = Node(3 )
A_ = Node(4 )
A_ = Node(5 )
return tree
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
if root is None:
return output
A_ = deque([root] )
while process_queue:
A_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Sequence[Node | None]:
A_ = []
def populate_output(UpperCAmelCase__, UpperCAmelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(UpperCAmelCase__, UpperCAmelCase__ )
return output
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
A_ = []
A_ = 0
A_ = height(UpperCAmelCase__ )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase__, UpperCAmelCase__ ) )
A_ = 0
return output
def UpperCAmelCase__ ( ) -> None: # Main function for testing.
A_ = make_tree()
print(F'''In-order Traversal: {inorder(UpperCAmelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCAmelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCAmelCase__ )}''', """\n""" )
print(F'''Height of Tree: {height(UpperCAmelCase__ )}''', """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(UpperCAmelCase__ ), """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1, height(UpperCAmelCase__ ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(UpperCAmelCase__, level=UpperCAmelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 162
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__SCREAMING_SNAKE_CASE = tf_top_k_top_p_filtering(__SCREAMING_SNAKE_CASE , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__SCREAMING_SNAKE_CASE = output[output != -float("""inf""" )]
__SCREAMING_SNAKE_CASE = tf.cast(
tf.where(tf.not_equal(__SCREAMING_SNAKE_CASE , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-12 )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase , a ):
"""simple docstring"""
if is_tf_available():
lowerCAmelCase__ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 2
class lowerCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__SCREAMING_SNAKE_CASE = [[2, 0], [102, 103]]
__SCREAMING_SNAKE_CASE = [[1, 0], [1, 1]]
__SCREAMING_SNAKE_CASE = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"""serving_default""": dummy_model.serving} )
__SCREAMING_SNAKE_CASE = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures["""serving_default"""]
for batch_size in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 ):
__SCREAMING_SNAKE_CASE = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__SCREAMING_SNAKE_CASE = serving_func(**__SCREAMING_SNAKE_CASE )["""sequences"""]
__SCREAMING_SNAKE_CASE = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
class lowerCAmelCase__ ( tf.Module ):
"""simple docstring"""
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE , return_dict_in_generate=__SCREAMING_SNAKE_CASE , )
return {"sequences": outputs["sequences"]}
__SCREAMING_SNAKE_CASE = [[2], [102, 103]]
__SCREAMING_SNAKE_CASE = [[1], [1, 1]]
__SCREAMING_SNAKE_CASE = DummyModel(model=__SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , signatures={"""serving_default""": dummy_model.serving} )
__SCREAMING_SNAKE_CASE = tf.saved_model.load(__SCREAMING_SNAKE_CASE ).signatures["""serving_default"""]
for input_row in range(len(__SCREAMING_SNAKE_CASE ) ):
__SCREAMING_SNAKE_CASE = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__SCREAMING_SNAKE_CASE = serving_func(**__SCREAMING_SNAKE_CASE )["""sequences"""]
__SCREAMING_SNAKE_CASE = test_model.generate(**__SCREAMING_SNAKE_CASE , max_new_tokens=__SCREAMING_SNAKE_CASE )
tf.debugging.assert_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
@require_tensorflow_text
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__SCREAMING_SNAKE_CASE , """spiece.model""" ) , """rb""" ).read() )
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text.pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__SCREAMING_SNAKE_CASE = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
return self.tokenizer.detokenize(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CompleteSentenceTransformer()
__SCREAMING_SNAKE_CASE = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__SCREAMING_SNAKE_CASE = complete_model(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.keras.Model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
keras_model.save(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__SCREAMING_SNAKE_CASE = 14
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__SCREAMING_SNAKE_CASE = """Hello, my dog is cute and"""
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__SCREAMING_SNAKE_CASE = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__SCREAMING_SNAKE_CASE = [638, 198]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__SCREAMING_SNAKE_CASE = model.generate(**__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__SCREAMING_SNAKE_CASE = """Hugging Face is a technology company based in New York and Paris."""
__SCREAMING_SNAKE_CASE = bart_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""tf""" ).input_ids
__SCREAMING_SNAKE_CASE = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__SCREAMING_SNAKE_CASE = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__SCREAMING_SNAKE_CASE = bart_model.generate(__SCREAMING_SNAKE_CASE , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
class lowerCAmelCase__ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().call(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = FakeEncoder(bart_model.config , bart_model.model.shared )
__SCREAMING_SNAKE_CASE = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__SCREAMING_SNAKE_CASE = bart_model.generate(__SCREAMING_SNAKE_CASE ).numpy()
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__SCREAMING_SNAKE_CASE , foo="""bar""" )
| 331
|
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331
| 1
|
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase : str = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def a__ ( A_ ):
'''simple docstring'''
re.sub("""<n>""", """""", A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 88
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCamelCase__ ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase = '''microsoft/speecht5_tts'''
lowerCamelCase = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
lowerCamelCase = '''text_reader'''
lowerCamelCase = SpeechTaProcessor
lowerCamelCase = SpeechTaForTextToSpeech
lowerCamelCase = SpeechTaHifiGan
lowerCamelCase = ['''text''']
lowerCamelCase = ['''audio''']
def _lowerCAmelCase ( self ) -> Optional[int]:
if self.post_processor is None:
_lowerCAmelCase ="""microsoft/speecht5_hifigan"""
super().setup()
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
_lowerCAmelCase =self.pre_processor(text=lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase =torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase_ )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
with torch.no_grad():
return self.post_processor(lowerCAmelCase_ ).cpu().detach()
| 365
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341
| 0
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
while lo < hi:
SCREAMING_SNAKE_CASE : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
SCREAMING_SNAKE_CASE : str = mid + 1
else:
SCREAMING_SNAKE_CASE : List[str] = mid
return lo
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ):
"""simple docstring"""
if hi < 0:
SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ )
while lo < hi:
SCREAMING_SNAKE_CASE : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
SCREAMING_SNAKE_CASE : str = mid
return lo
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_left(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 , lowerCamelCase_ = -1 ):
"""simple docstring"""
sorted_collection.insert(bisect_right(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1
while left <= right:
SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
SCREAMING_SNAKE_CASE : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
SCREAMING_SNAKE_CASE : str = midpoint - 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = midpoint + 1
return None
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = bisect.bisect_left(lowerCamelCase_ , lowerCamelCase_ )
if index != len(lowerCamelCase_ ) and sorted_collection[index] == item:
return index
return None
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if right < left:
return None
SCREAMING_SNAKE_CASE : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowerCamelCase_ , lowerCamelCase_ , midpoint + 1 , lowerCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__UpperCAmelCase = sorted(int(item) for item in user_input.split(""","""))
__UpperCAmelCase = int(input("""Enter a single number to be found in the list:\n"""))
__UpperCAmelCase = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 323
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE : Optional[int] = Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[str] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE : int = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 323
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def a_ ( ):
lowerCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
lowerCAmelCase = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert('RGB' )
return image
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def a_ ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
lowerCAmelCase = dct.pop(lowerCamelCase )
lowerCAmelCase = val
def a_ ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowerCAmelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowerCAmelCase = torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
lowerCAmelCase = qkv_bias
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : Any ):
lowerCAmelCase = 364 if 'coco' in model_name else 224
lowerCAmelCase = BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
lowerCAmelCase = BlipaConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def a_ ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int]=None , lowerCamelCase : Tuple=False ):
lowerCAmelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
lowerCAmelCase = tokenizer('\n' , add_special_tokens=lowerCamelCase ).input_ids[0]
lowerCAmelCase , lowerCAmelCase = get_blipa_config(lowerCamelCase , eos_token_id=lowerCamelCase )
lowerCAmelCase = BlipaForConditionalGeneration(lowerCamelCase ).eval()
lowerCAmelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
lowerCAmelCase , lowerCAmelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
lowerCAmelCase = original_model.state_dict()
lowerCAmelCase = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase = state_dict.pop(lowerCamelCase )
if key.startswith('Qformer.bert' ):
lowerCAmelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
lowerCAmelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
lowerCAmelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
lowerCAmelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
lowerCAmelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
lowerCAmelCase = key.replace('t5' , 'language' )
lowerCAmelCase = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase = load_demo_image()
lowerCAmelCase = vis_processors['eval'](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
lowerCAmelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCamelCase )
# create processor
lowerCAmelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
lowerCAmelCase = BlipaProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
lowerCAmelCase = processor(images=lowerCamelCase , return_tensors='pt' ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
lowerCAmelCase = hf_model(lowerCamelCase , lowerCamelCase ).logits
else:
lowerCAmelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
lowerCAmelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase = hf_model(lowerCamelCase , lowerCamelCase , labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=lowerCamelCase )
else:
# cast to same type
lowerCAmelCase = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ) , lowerCamelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
lowerCAmelCase = ''
lowerCAmelCase = tokenizer(lowerCamelCase , return_tensors='pt' ).input_ids.to(lowerCamelCase )
lowerCAmelCase = original_model.generate({'image': original_pixel_values} )
lowerCAmelCase = hf_model.generate(
lowerCamelCase , lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCamelCase )
lowerCAmelCase = input_ids.shape[1]
lowerCAmelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase )
lowerCAmelCase = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
__snake_case =[
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
__snake_case =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 55
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
lowerCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCamelCase )
return parser.parse_args()
def a_ ( ):
lowerCAmelCase = parse_args()
# Import training_script as a module.
lowerCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase = script_fpath.stem
lowerCAmelCase = importlib.import_module(lowerCamelCase )
# Patch sys.argv
lowerCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 55
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "time_series_transformer"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "student_t" ,A : str = "nll" ,A : int = 1 ,A : List[int] = [1, 2, 3, 4, 5, 6, 7] ,A : Optional[Union[str, bool]] = "mean" ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : int = 0 ,A : Optional[List[int]] = None ,A : Optional[List[int]] = None ,A : int = 32 ,A : int = 32 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : int = 2 ,A : bool = True ,A : str = "gelu" ,A : int = 64 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : float = 0.1 ,A : int = 1_00 ,A : float = 0.02 ,A : Union[str, Any]=True ,**A : Optional[int] ,):
# time series specific configuration
__A = prediction_length
__A = context_length or prediction_length
__A = distribution_output
__A = loss
__A = input_size
__A = num_time_features
__A = lags_sequence
__A = scaling
__A = num_dynamic_real_features
__A = num_static_real_features
__A = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__A = cardinality
else:
__A = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__A = embedding_dimension
else:
__A = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
__A = num_parallel_samples
# Transformer architecture configuration
__A = input_size * len(A ) + self._number_of_features
__A = d_model
__A = encoder_attention_heads
__A = decoder_attention_heads
__A = encoder_ffn_dim
__A = decoder_ffn_dim
__A = encoder_layers
__A = decoder_layers
__A = dropout
__A = attention_dropout
__A = activation_dropout
__A = encoder_layerdrop
__A = decoder_layerdrop
__A = activation_function
__A = init_std
__A = use_cache
super().__init__(is_encoder_decoder=A ,**A )
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 15
|
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345
| 0
|
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]=1024 , UpperCAmelCase_ : Dict=1024 , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Union[str, Any] ):
A__ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
A__ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="""train""" , **UpperCAmelCase_ )
A__ = tok.pad_token_id
def get_lens(UpperCAmelCase_ : str ):
A__ = tqdm(
DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
A__ = []
for batch in dl:
A__ = batch["""input_ids"""].ne(UpperCAmelCase_ ).sum(1 ).tolist()
A__ = batch["""labels"""].ne(UpperCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
max_lens.extend(UpperCAmelCase_ )
return max_lens
A__ = get_lens(UpperCAmelCase_ )
A__ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="""val""" , **UpperCAmelCase_ )
A__ = get_lens(UpperCAmelCase_ )
pickle_save(UpperCAmelCase_ , train_ds.len_file )
pickle_save(UpperCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 363
|
"""simple docstring"""
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = Node(UpperCamelCase )
A__ = self.head
A__ = new_node
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 69
| 0
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
__magic_name__ : Optional[Any] = tf.convert_to_tensor(
[
[
8.222_0991, # 3rd highest value; idx. 0
-0.562_0044,
5.2322_9752,
4.038_6393,
-6.879_8378,
-0.5478_5802,
-3.201_2153,
2.9277_7176,
1.8817_1953,
7.3534_1276, # 5th highest value; idx. 9
8.4320_7833, # 2nd highest value; idx. 10
-9.8571_1836,
-5.9620_9236,
-1.1303_9161,
-7.111_5294,
-0.836_9633,
-5.318_6408,
7.0642_7407,
0.8136_9344,
-0.8202_3817,
-5.917_9796,
0.5881_3443,
-6.9977_8438,
4.7155_1189,
-0.1877_1637,
7.4402_0759, # 4th highest value; idx. 25
9.3845_0987, # 1st highest value; idx. 26
2.1266_2941,
-9.3256_2038,
2.3565_2522,
], # cummulative prob of 5 highest values <= 0.6
[
0.5842_5518,
4.5313_9238,
-5.5751_0464,
-6.2803_0699,
-7.1952_9503,
-4.0212_2551,
1.3933_7037,
-6.0670_7057,
1.5948_0517,
-9.64_3119,
0.0390_7799,
0.6723_1762,
-8.8820_6726,
6.2711_5922, # 4th highest value; idx. 13
2.2852_0723,
4.8276_7506,
4.3042_1368,
8.827_5313, # 2nd highest value; idx. 17
5.4402_9958, # 5th highest value; idx. 18
-4.473_5794,
7.3857_9536, # 3rd highest value; idx. 20
-2.9105_1663,
2.6194_6077,
-2.567_4762,
-9.4895_9302,
-4.0292_2645,
-1.3541_6918,
9.6770_2323, # 1st highest value; idx. 27
-5.8947_8553,
1.8537_0467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__magic_name__ : Union[str, Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__magic_name__ : Tuple = tf.convert_to_tensor(
[8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above
__magic_name__ : Any = tf_top_k_top_p_filtering(_A , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__magic_name__ : List[Any] = output[output != -float('inf' )]
__magic_name__ : Any = tf.cast(
tf.where(tf.not_equal(_A , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_A , _A , rtol=1E-12 )
tf.debugging.assert_equal(_A , _A )
@require_tf
class _lowerCamelCase ( unittest.TestCase , lowercase__ ):
'''simple docstring'''
if is_tf_available():
A_ : Optional[Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
# TF-only test: tf.saved_model export
__magic_name__ : Any = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__magic_name__ : Tuple = 2
__magic_name__ : Tuple = 2
class _lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _A : Optional[int] ) -> int:
super(_A , self ).__init__()
__magic_name__ : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ),
tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ),
) , jit_compile=_A , )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : List[Any] ) -> Tuple:
__magic_name__ : Optional[int] = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
__magic_name__ : Optional[Any] = [[2, 0], [102, 103]]
__magic_name__ : List[str] = [[1, 0], [1, 1]]
__magic_name__ : int = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving} )
__magic_name__ : Optional[Any] = tf.saved_model.load(_A ).signatures['serving_default']
for batch_size in range(1 , len(_A ) + 1 ):
__magic_name__ : List[Any] = {
'input_ids': tf.constant(dummy_input_ids[:batch_size] ),
'attention_mask': tf.constant(dummy_attention_masks[:batch_size] ),
}
__magic_name__ : Optional[Any] = serving_func(**_A )['sequences']
__magic_name__ : List[Any] = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
# TF-only test: tf.saved_model export
__magic_name__ : str = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__magic_name__ : Dict = 1
__magic_name__ : Optional[Any] = 2
class _lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Any , _A : Tuple ) -> Tuple:
super(_A , self ).__init__()
__magic_name__ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ),
tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ),
) , jit_compile=_A , )
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : List[Any] = self.model.generate(
input_ids=_A , attention_mask=_A , max_new_tokens=_A , return_dict_in_generate=_A , )
return {"sequences": outputs["sequences"]}
__magic_name__ : int = [[2], [102, 103]]
__magic_name__ : List[str] = [[1], [1, 1]]
__magic_name__ : List[str] = DummyModel(model=_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_A , _A , signatures={'serving_default': dummy_model.serving} )
__magic_name__ : Optional[int] = tf.saved_model.load(_A ).signatures['serving_default']
for input_row in range(len(_A ) ):
__magic_name__ : List[Any] = {
'input_ids': tf.constant([dummy_input_ids[input_row]] ),
'attention_mask': tf.constant([dummy_attention_masks[input_row]] ),
}
__magic_name__ : Optional[int] = serving_func(**_A )['sequences']
__magic_name__ : Tuple = test_model.generate(**_A , max_new_tokens=_A )
tf.debugging.assert_equal(_A , _A )
@slow
@require_tensorflow_text
def __lowerCAmelCase ( self : int ) -> List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=_A )
class _lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Dict:
super().__init__()
__magic_name__ : str = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_A , 'spiece.model' ) , 'rb' ).read() )
__magic_name__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , *_A : Any , **_A : Tuple ) -> Any:
__magic_name__ : Dict = self.tokenizer.tokenize(_A )
__magic_name__ , __magic_name__ : Dict = text.pad_model_inputs(
_A , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__magic_name__ : str = self.model.generate(input_ids=_A , attention_mask=_A )
return self.tokenizer.detokenize(_A )
__magic_name__ : Union[str, Any] = CompleteSentenceTransformer()
__magic_name__ : Any = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' )
__magic_name__ : Union[str, Any] = complete_model(_A )
__magic_name__ : Dict = tf.keras.Model(_A , _A )
keras_model.save(_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
# Has PT equivalent: this test relies on random sampling
__magic_name__ : List[str] = {
'do_sample': True,
'num_beams': 1,
'top_p': 0.7,
'top_k': 10,
'temperature': 0.7,
}
__magic_name__ : str = 14
__magic_name__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__magic_name__ : List[str] = 'Hello, my dog is cute and'
__magic_name__ : Optional[int] = tokenizer(_A , return_tensors='tf' )
__magic_name__ : List[str] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__magic_name__ : Optional[int] = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__magic_name__ : Tuple = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__magic_name__ : Optional[Any] = [638, 198]
with tf.device(':/CPU:0' ):
tf.random.set_seed(0 )
__magic_name__ : Optional[int] = model.generate(**_A , eos_token_id=_A , **_A )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
# Has PT equivalent: ample use of framework-specific code
__magic_name__ : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' )
__magic_name__ : Dict = 'Hugging Face is a technology company based in New York and Paris.'
__magic_name__ : Tuple = bart_tokenizer(_A , return_tensors='tf' ).input_ids
__magic_name__ : int = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' )
__magic_name__ : str = bart_model.generate(_A ).numpy()
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Any=None , **_A : Tuple ) -> int:
return super().call(_A , **_A )
__magic_name__ : str = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' )
__magic_name__ : List[str] = bart_model.generate(_A , foo='bar' ).numpy()
self.assertTrue(np.array_equal(_A , _A ) )
class _lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , _A : List[Any] , **_A : Any ) -> str:
return super().call(_A , **_A )
__magic_name__ : Optional[int] = FakeEncoder(bart_model.config , bart_model.model.shared )
__magic_name__ : Tuple = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__magic_name__ : Any = bart_model.generate(_A ).numpy()
with self.assertRaises(_A ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_A , foo='bar' )
| 331
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
| 331
| 1
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowercase_ :str = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
lowercase_ :Any = Dataset.from_dict(a__ )
return dataset
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :int = get_dataset()
lowercase_ :Union[str, Any] = make_duplicate_clusters(_snake_case , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = get_dataset()
lowercase_ , lowercase_ :Dict = deduplicate_dataset(_snake_case )
self.assertEqual(len(_snake_case ) , 2 )
print(_snake_case )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _snake_case )
| 361
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = False ):
lowercase_ :List[str] = scheduler
lowercase_ :Optional[Any] = optimizers if isinstance(UpperCamelCase_ , (list, tuple) ) else [optimizers]
lowercase_ :Tuple = split_batches
lowercase_ :str = step_with_optimizer
lowercase_ :int = GradientState()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowercase_ :Optional[Any] = AcceleratorState().num_processes
for _ in range(UpperCamelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
else:
self.scheduler.step(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_last_lr()
def UpperCamelCase ( self ):
return self.scheduler.state_dict()
def UpperCamelCase ( self , UpperCamelCase_ ):
self.scheduler.load_state_dict(UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.scheduler.get_lr()
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.scheduler.print_lr(*UpperCamelCase_ , **UpperCamelCase_ )
| 252
| 0
|
"""simple docstring"""
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =set()
# Replace all the whitespace in our sentence
a =input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase ) == 26
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
a =[False] * 26
for char in input_str:
if char.islower():
a =True
elif char.isupper():
a =True
return all(lowercase )
def _A ( lowercase = "The quick brown fox jumps over the lazy dog" , ):
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _A ( ):
"""simple docstring"""
from timeit import timeit
a ='''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowercase ) )
print(timeit('''is_pangram_faster()''' , setup=lowercase ) )
print(timeit('''is_pangram_fastest()''' , setup=lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 81
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase=None ) -> int:
if not conversation_id:
_snake_case = uuid.uuida()
if past_user_inputs is None:
_snake_case = []
if generated_responses is None:
_snake_case = []
_snake_case = conversation_id
_snake_case = past_user_inputs
_snake_case = generated_responses
_snake_case = text
def __eq__(self , UpperCAmelCase ) -> Dict:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase (self , UpperCAmelCase , UpperCAmelCase = False ) -> int:
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
_snake_case = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_snake_case = text
def lowercase (self ) -> int:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> Any:
self.generated_responses.append(UpperCAmelCase )
def lowercase (self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__(self ) -> Optional[int]:
_snake_case = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_snake_case = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__snake_case , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
_snake_case = self.tokenizer.eos_token
def lowercase (self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = {}
if min_length_for_response is not None:
_snake_case = min_length_for_response
if minimum_tokens is not None:
_snake_case = minimum_tokens
if "max_length" in generate_kwargs:
_snake_case = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_snake_case = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__(self , UpperCAmelCase , UpperCAmelCase=0 , **UpperCAmelCase ) -> Union[str, Any]:
_snake_case = super().__call__(UpperCAmelCase , num_workers=UpperCAmelCase , **UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowercase (self , UpperCAmelCase , UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_snake_case = self.tokenizer._build_conversation_input_ids(UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_snake_case = self._legacy_parse_and_tokenize(UpperCAmelCase )
if self.framework == "pt":
_snake_case = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_snake_case = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase (self , UpperCAmelCase , UpperCAmelCase=10 , **UpperCAmelCase ) -> Optional[int]:
_snake_case = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_snake_case = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_snake_case = max_length - minimum_tokens
_snake_case = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_snake_case = model_inputs["""attention_mask"""][:, -trim:]
_snake_case = model_inputs.pop("""conversation""" )
_snake_case = max_length
_snake_case = self.model.generate(**UpperCAmelCase , **UpperCAmelCase )
if self.model.config.is_encoder_decoder:
_snake_case = 1
else:
_snake_case = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase (self , UpperCAmelCase , UpperCAmelCase=True ) -> List[str]:
_snake_case = model_outputs["""output_ids"""]
_snake_case = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase , )
_snake_case = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(UpperCAmelCase )
return conversation
def lowercase (self , UpperCAmelCase ) -> Dict:
_snake_case = self.tokenizer.eos_token_id
_snake_case = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) )
if len(UpperCAmelCase ) > self.tokenizer.model_max_length:
_snake_case = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 341
| 0
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ):
@wraps(UpperCamelCase )
def _inner_fn(*UpperCamelCase , **UpperCamelCase ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase , )
return fn(*UpperCamelCase , **UpperCamelCase )
return _inner_fn
| 351
|
'''simple docstring'''
from collections import deque
class _lowercase :
def __init__( self: int , UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: int ):
lowerCamelCase__ : int = process_name # process name
lowerCamelCase__ : int = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCamelCase__ : List[str] = arrival_time
lowerCamelCase__ : Tuple = burst_time # remaining burst time
lowerCamelCase__ : str = 0 # total time of the process wait in ready queue
lowerCamelCase__ : Optional[Any] = 0 # time from arrival time to completion time
class _lowercase :
def __init__( self: Any , UpperCamelCase__: int , UpperCamelCase__: list[int] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int , ):
# total number of mlfq's queues
lowerCamelCase__ : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCamelCase__ : List[Any] = time_slices
# unfinished process is in this ready_queue
lowerCamelCase__ : int = queue
# current time
lowerCamelCase__ : Optional[int] = current_time
# finished process is in this sequence queue
lowerCamelCase__ : deque[Process] = deque()
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : int = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self: Any , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : Optional[int] = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: list[Process] ):
lowerCamelCase__ : List[Any] = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self: int , UpperCamelCase__: deque[Process] ):
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: deque[Process] ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
lowerCamelCase__ : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCamelCase__ : Dict = 0
# set the process's turnaround time because it is finished
lowerCamelCase__ : List[str] = self.current_time - cp.arrival_time
# set the completion time
lowerCamelCase__ : int = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: deque[Process] , UpperCamelCase__: int ):
lowerCamelCase__ : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
lowerCamelCase__ : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCamelCase__ : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCamelCase__ : Any = 0
# set the finish time
lowerCamelCase__ : List[Any] = self.current_time
# update the process' turnaround time because it is finished
lowerCamelCase__ : Any = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self: Tuple ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A : Optional[Any] =Process('''P1''', 0, 53)
_A : List[Any] =Process('''P2''', 0, 17)
_A : Any =Process('''P3''', 0, 68)
_A : Tuple =Process('''P4''', 0, 24)
_A : int =3
_A : Tuple =[17, 25]
_A : List[Any] =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_A : Dict =Process('''P1''', 0, 53)
_A : Union[str, Any] =Process('''P2''', 0, 17)
_A : int =Process('''P3''', 0, 68)
_A : Dict =Process('''P4''', 0, 24)
_A : List[str] =3
_A : List[Any] =[17, 25]
_A : Any =deque([Pa, Pa, Pa, Pa])
_A : List[str] =MLFQ(number_of_queues, time_slices, queue, 0)
_A : Union[str, Any] =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 129
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a_ : Optional[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[int] = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
a_ : Dict = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
a_ : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = LxmertTokenizer
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase="[UNK]" , UpperCamelCase="[SEP]" , UpperCamelCase="[PAD]" , UpperCamelCase="[CLS]" , UpperCamelCase="[MASK]" , UpperCamelCase=True , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(UpperCamelCase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**UpperCamelCase )
lowerCamelCase_ = do_lower_case
def snake_case ( self , UpperCamelCase , UpperCamelCase=None ):
"""simple docstring"""
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 55
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int("1" + "0" * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def __snake_case ( UpperCAmelCase_ : int = 2 ):
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
lowerCamelCase_ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 55
| 1
|
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =(1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _A ( SCREAMING_SNAKE_CASE : int = 5_000 ):
"""simple docstring"""
a__ : Optional[int] =[(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
a__ : List[Any] =pentagonal_nums[j]
a__ : Optional[Any] =pentagonal_i + pentagonal_j
a__ : Optional[Any] =pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = ["""vqvae"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
a__ : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Tuple =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a__ : List[str] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__ : Optional[Any] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
a__ : List[str] =noise
a__ : Optional[Any] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =self.mel.audio_slice_to_image(lowerCAmelCase__ )
a__ : List[Any] =np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
a__ : Optional[Any] =(input_image / 2_5_5) * 2 - 1
a__ : Dict =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a__ : str =self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
a__ : Any =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__ : Optional[int] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
a__ : Tuple =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__ : Union[str, Any] =int(mask_start_secs * pixels_per_second )
a__ : List[str] =int(mask_end_secs * pixels_per_second )
a__ : Optional[Any] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
a__ : List[str] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
else:
a__ : Optional[Any] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
if isinstance(self.scheduler , lowerCAmelCase__ ):
a__ : int =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
else:
a__ : str =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a__ : List[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
a__ : Union[str, Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__ : Any =1 / self.vqvae.config.scaling_factor * images
a__ : str =self.vqvae.decode(lowerCAmelCase__ )["sample"]
a__ : str =(images / 2 + 0.5).clamp(0 , 1 )
a__ : int =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a__ : List[Any] =(images * 2_5_5).round().astype("uint8" )
a__ : Dict =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="RGB" ).convert("L" ) for _ in images) )
a__ : str =[self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
a__ : Tuple =(sample / 2_5_5) * 2 - 1
a__ : List[Any] =torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a__ : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__ : Dict =self.scheduler.alphas_cumprod[t]
a__ : Optional[Any] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__ : Optional[Any] =1 - alpha_prod_t
a__ : str =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
a__ : Optional[Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
a__ : List[str] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__ : Optional[Any] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
a__ : Any =acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 148
| 1
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case_ = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCAmelCase ( lowercase_ , lowercase_=None ):
require_version(deps[pkg] , lowercase_ )
| 78
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[List[PIL.Image.Image], np.ndarray]
__snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : np.ndarray
__snake_case : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 193
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Any = BertTokenizer
__snake_case : Dict = BertTokenizerFast
__snake_case : Tuple = True
__snake_case : List[Any] = True
__snake_case : Optional[Any] = filter_non_english
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,strip_accents=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCamelCase__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = """a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCamelCase__ ) ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCamelCase__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCamelCase__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,return_offsets_mapping=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCamelCase__ ,"""do_lower_case""" ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCamelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCamelCase__ )
]
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 193
| 1
|
'''simple docstring'''
import qiskit
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : Any = qiskit.Aer.get_backend('aer_simulator' )
__a : int = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__a : Any = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 27
|
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
lowerCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowerCamelCase = current_dis
return min_dis
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
lowerCamelCase = points_counts // 2
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
lowerCamelCase = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
lowerCamelCase = min(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
lowerCamelCase = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=0 )
lowerCamelCase = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase : Dict = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 252
| 0
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _A ( _a ):
"""simple docstring"""
def __init__( self : str , *__UpperCAmelCase : int , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : List[Any]):
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
a : Dict = eval_examples
a : int = post_process_function
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : str = "eval"):
a : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
a : Dict = self.get_eval_dataloader(__UpperCAmelCase)
a : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a : Union[str, Any] = self.compute_metrics
a : List[str] = None
a : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a : List[str] = time.time()
try:
a : Tuple = eval_loop(
__UpperCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
a : Union[str, Any] = compute_metrics
a : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a : Dict = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions)
a : List[str] = self.compute_metrics(__UpperCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
a : Dict = metrics.pop(__UpperCAmelCase)
metrics.update(output.metrics)
else:
a : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
a : Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase)
return metrics
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test"):
a : int = self.get_test_dataloader(__UpperCAmelCase)
# Temporarily disable metric computation, we will do it in the loop here.
a : Optional[Any] = self.compute_metrics
a : Any = None
a : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a : Union[str, Any] = time.time()
try:
a : Dict = eval_loop(
__UpperCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
a : Dict = compute_metrics
a : List[str] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
a : Any = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , "predict")
a : int = self.compute_metrics(__UpperCAmelCase)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'''{metric_key_prefix}_'''):
a : Any = metrics.pop(__UpperCAmelCase)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase)
| 226
|
"""simple docstring"""
import sys
import turtle
def lowercase ( A_ , A_ )-> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 226
| 1
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ :Union[str, Any] = version.parse(importlib_metadata.version('''nltk'''))
if NLTK_VERSION >= version.Version('''3.6.4'''):
from nltk import word_tokenize
lowerCAmelCase__ :Tuple = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase__ :Optional[int] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase__ :int = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(__lowerCamelCase ) , word_tokenize(__lowerCamelCase ) , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase )
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase )
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(__lowerCamelCase , __lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , gamma=__lowerCamelCase )
for ref, pred in zip(__lowerCamelCase , __lowerCamelCase )
]
return {"meteor": np.mean(__lowerCamelCase )}
| 329
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""Speech2TextFeatureExtractor"""
snake_case_ ="""Speech2TextTokenizer"""
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase ,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''sampling_rate''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ : Union[str, Any] = args[0]
lowerCAmelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(__lowerCamelCase ,*__lowerCamelCase ,sampling_rate=__lowerCamelCase ,**__lowerCamelCase )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@contextmanager
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 129
| 0
|
'''simple docstring'''
from math import sqrt
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ):
'''simple docstring'''
snake_case_ : Any = 0
for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def UpperCAmelCase ( lowerCamelCase_ :List[Any] = 1_00_00 ):
'''simple docstring'''
snake_case_ : int = sum(
i
for i in range(1 , lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 362
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :list ):
'''simple docstring'''
if len(lowerCamelCase_ ) <= 1:
return lst
snake_case_ : Union[str, Any] = 1
while i < len(lowerCamelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case_ : int = 1
return lst
if __name__ == "__main__":
__A : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__A : int = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 8
| 0
|
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : str ):
snake_case : Dict = 0
for ch in input_str:
snake_case : List[str] = ord(lowercase__ )
snake_case : Tuple = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Optional[Any] = """mask2former"""
a__ : Union[str, Any] = ["""swin"""]
a__ : Dict = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : List[str] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : List[str] = backbone_config
snake_case : Optional[int] = feature_size
snake_case : Optional[int] = mask_feature_size
snake_case : Optional[int] = hidden_dim
snake_case : List[str] = encoder_feedforward_dim
snake_case : Dict = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : Any = decoder_layers
snake_case : Optional[int] = num_attention_heads
snake_case : List[str] = dropout
snake_case : List[Any] = dim_feedforward
snake_case : Tuple = pre_norm
snake_case : int = enforce_input_projection
snake_case : str = common_stride
snake_case : List[Any] = ignore_value
snake_case : Optional[int] = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Dict = class_weight
snake_case : Tuple = mask_weight
snake_case : Tuple = dice_weight
snake_case : Tuple = train_num_points
snake_case : int = oversample_ratio
snake_case : Dict = importance_sample_ratio
snake_case : Tuple = init_std
snake_case : Dict = init_xavier_std
snake_case : List[Any] = use_auxiliary_loss
snake_case : Dict = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Union[str, Any] = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 148
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Any:
'''simple docstring'''
if rng is None:
snake_case : Tuple = global_rng
snake_case : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : List[str]=2000 , UpperCamelCase__ : int=1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=1_6000 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=True , ) -> Dict:
"""simple docstring"""
snake_case : Optional[int] = parent
snake_case : List[str] = batch_size
snake_case : Tuple = min_seq_length
snake_case : Dict = max_seq_length
snake_case : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : Union[str, Any] = feature_size
snake_case : List[str] = padding_value
snake_case : List[str] = sampling_rate
snake_case : int = return_attention_mask
snake_case : Dict = do_normalize
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any=False ) -> int:
"""simple docstring"""
def _flatten(UpperCamelCase__ : Tuple ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
snake_case : Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : Dict = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
class snake_case__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = WavaVecaFeatureExtractor
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case : Dict = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : Optional[Any] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
snake_case : List[Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case : Any = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
snake_case : Tuple = feat_extract(UpperCamelCase__ , return_tensors='''np''' ).input_values
snake_case : Dict = feat_extract(UpperCamelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case : Any = np.asarray(UpperCamelCase__ )
snake_case : Union[str, Any] = feat_extract(UpperCamelCase__ , return_tensors='''np''' ).input_values
snake_case : Any = feat_extract(UpperCamelCase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case : int = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : List[str] = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='''np''' )
snake_case : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Dict = range(800 , 1400 , 200 )
snake_case : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
snake_case : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
snake_case : int = [None, 1600, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Any = feat_extract(UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ )
snake_case : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : Union[str, Any] = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1000 , padding='''max_length''' , return_tensors='''np''' )
snake_case : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : Tuple = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1000 , padding='''longest''' , return_tensors='''np''' )
snake_case : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
snake_case : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
snake_case : Dict = feat_extract(
UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=2000 , padding='''longest''' , return_tensors='''np''' )
snake_case : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
import torch
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Union[str, Any] = np.random.rand(100 ).astype(np.floataa )
snake_case : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : Dict = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case : Union[str, Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case : Union[str, Any] = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 360
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
snake_case : int = json.load(SCREAMING_SNAKE_CASE__ )
snake_case : Any = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
snake_case : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
snake_case : Dict = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ )
# add an entry for [MASK2]
snake_case : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Union[str, Any] = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = '''MLukeTokenizer'''
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
snake_case : List[str] = state_dict['''embeddings.word_embeddings.weight''']
snake_case : int = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Dict = state_dict[bias_name]
snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : str = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : int = state_dict[prefix + matrix_name]
snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
snake_case : int = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
snake_case : Dict = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[Any] = state_dict['''entity_predictions.bias''']
snake_case : Optional[int] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : str = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
snake_case : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
snake_case : int = state_dict[key]
else:
snake_case : List[str] = state_dict[key]
snake_case ,snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[int] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='''entity_classification''' )
snake_case : Tuple = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
snake_case : int = (0, 9)
snake_case : str = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' )
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Dict = torch.Size((1, 33, 768) )
snake_case : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : str = torch.Size((1, 1, 768) )
snake_case : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = '''Tokyo is the capital of <mask>.'''
snake_case : Union[str, Any] = (24, 30)
snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' )
snake_case : int = model(**SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = encoding['''input_ids'''][0].tolist()
snake_case : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
snake_case : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = outputs.entity_logits[0][0].argmax().item()
snake_case : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : Dict = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
snake_case : List[Any] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )]
snake_case : Optional[int] = {}
for entry in data:
snake_case : Optional[Any] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : List[str] = entity_id
break
snake_case : Any = F'{language}:{entity_name}'
snake_case : List[str] = entity_id
return new_mapping
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowercase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 83
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a__: str = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: int = ['OwlViTFeatureExtractor']
a__: Optional[int] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Optional[int] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 193
|
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = set_counts
A__ = max(__lowerCamelCase )
A__ = len(__lowerCamelCase )
A__ = [1] * num_sets
A__ = list(range(__lowerCamelCase ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.get_parent(__lowerCamelCase )
A__ = self.get_parent(__lowerCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
A__ = 0
A__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
A__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
A__ = 0
A__ = src_parent
A__ = self.set_counts[src_parent]
A__ = max(self.max_set,__lowerCamelCase )
return True
def UpperCamelCase ( self,__lowerCamelCase ):
if self.parents[disj_set] == disj_set:
return disj_set
A__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 193
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class lowerCamelCase__ ( __lowercase ):
_lowerCAmelCase = '''encodec'''
def __init__( self : Optional[int] , _a : int=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , _a : str=2_4_0_0_0 , _a : List[Any]=1 , _a : Optional[int]=False , _a : Union[str, Any]=None , _a : Optional[Any]=None , _a : Dict=1_2_8 , _a : Union[str, Any]=3_2 , _a : Any=1 , _a : List[Any]=[8, 5, 4, 2] , _a : List[Any]="weight_norm" , _a : List[str]=7 , _a : Optional[Any]=7 , _a : Optional[Any]=3 , _a : Any=2 , _a : Tuple=True , _a : Union[str, Any]="reflect" , _a : Dict=2 , _a : Optional[int]=2 , _a : List[Any]=1.0 , _a : str=1_0_2_4 , _a : Dict=None , _a : str=True , **_a : Dict , ):
a__: int =target_bandwidths
a__: Any =sampling_rate
a__: List[Any] =audio_channels
a__: Any =normalize
a__: List[Any] =chunk_length_s
a__: List[Any] =overlap
a__: Optional[Any] =hidden_size
a__: Union[str, Any] =num_filters
a__: int =num_residual_layers
a__: Any =upsampling_ratios
a__: Tuple =norm_type
a__: Tuple =kernel_size
a__: Any =last_kernel_size
a__: List[Any] =residual_kernel_size
a__: str =dilation_growth_rate
a__: Union[str, Any] =use_causal_conv
a__: Any =pad_mode
a__: Union[str, Any] =compress
a__: Any =num_lstm_layers
a__: List[str] =trim_right_ratio
a__: int =codebook_size
a__: str =codebook_dim if codebook_dim is not None else hidden_size
a__: List[str] =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**_a )
@property
def _lowerCamelCase ( self : List[Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowerCamelCase ( self : int ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowerCamelCase ( self : Dict ):
a__: List[Any] =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 350
|
import os
def __lowerCamelCase ( __magic_name__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
a__: str =[
[int(__magic_name__ ) for element in line.split("," )]
for line in input_file.readlines()
]
a__: int =len(__magic_name__ )
a__: int =len(matrix[0] )
a__: Optional[Any] =[[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
a__: int =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__: Tuple =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 42
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__A =argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
__A , __A =parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
__A =rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
__A =rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__A =args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 226
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE :Tuple = """vivit"""
def __init__( self : str , a__ : Union[str, Any]=224 , a__ : Optional[int]=32 , a__ : Optional[Any]=[2, 16, 16] , a__ : Tuple=3 , a__ : Tuple=768 , a__ : List[Any]=12 , a__ : Dict=12 , a__ : List[Any]=3072 , a__ : Union[str, Any]="gelu_fast" , a__ : Any=0.0 , a__ : Optional[Any]=0.0 , a__ : List[str]=0.02 , a__ : List[str]=1E-06 , a__ : List[Any]=True , **a__ : Any , ):
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = num_frames
__magic_name__ = tubelet_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
super().__init__(**__lowerCamelCase )
| 356
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( a ) -> list[str]:
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 11
__magic_name__ = int('''1''' + '''0''' * digit_len )
for num in range(a , a ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a , a ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
__magic_name__ = 10
return solutions
def UpperCamelCase ( a = 2 ) -> int:
'''simple docstring'''
__magic_name__ = 1.0
for fraction in fraction_list(a ):
__magic_name__ = Fraction(a )
result *= frac.denominator / frac.numerator
return int(a )
if __name__ == "__main__":
print(solution())
| 98
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A : List[str] = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Tuple = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Dict = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Optional[Any] = {
"num_train_timesteps": 4_0,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : List[str] = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : Optional[Any] = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.bias"]
__lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint["time_embed.0.weight"]
__lowerCAmelCase = checkpoint["time_embed.0.bias"]
__lowerCAmelCase = checkpoint["time_embed.2.weight"]
__lowerCAmelCase = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase = checkpoint["label_emb.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.bias"]
__lowerCAmelCase = unet_config["down_block_types"]
__lowerCAmelCase = unet_config["layers_per_block"]
__lowerCAmelCase = unet_config["attention_head_dim"]
__lowerCAmelCase = unet_config["block_out_channels"]
__lowerCAmelCase = 1
__lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(_UpperCamelCase ):
__lowerCAmelCase = channels_list[i]
__lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"down_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"down_blocks.{i}.downsamplers.0"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
__lowerCAmelCase = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase = "mid_block.resnets.0"
__lowerCAmelCase = "middle_block.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.attentions.0"
__lowerCAmelCase = "middle_block.1"
__lowerCAmelCase = convert_attention(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.resnets.1"
__lowerCAmelCase = "middle_block.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = unet_config["up_block_types"]
for i, layer_type in enumerate(_UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.1"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"up_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = checkpoint["out.0.weight"]
__lowerCAmelCase = checkpoint["out.0.bias"]
__lowerCAmelCase = checkpoint["out.2.weight"]
__lowerCAmelCase = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
A : Optional[Any] = parser.parse_args()
A : int = strabool(args.class_cond)
A : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
A : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
A : Union[str, Any] = None
A : int = con_pt_to_diffuser(args.unet_path, unet_config)
A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
A : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
A : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 57
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) ->List[Any]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCamelCase ) for s in shape] )}.npy'''
def snake_case__( self : Any ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__( self : int , _UpperCamelCase : Union[str, Any]=0 , _UpperCamelCase : int=(4, 4, 6_4, 6_4) , _UpperCamelCase : Optional[int]=False ) ->Tuple:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return image
def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ) ->Optional[Any]:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = '''bf16''' if fpaa else None
snake_case_, snake_case_ = FlaxUNetaDConditionModel.from_pretrained(
_UpperCamelCase , subfolder='''unet''' , dtype=_UpperCamelCase , revision=_UpperCamelCase )
return model, params
def snake_case__( self : Dict , _UpperCamelCase : List[Any]=0 , _UpperCamelCase : Tuple=(4, 7_7, 7_6_8) , _UpperCamelCase : List[Any]=False ) ->int:
snake_case_ = jnp.bfloataa if fpaa else jnp.floataa
snake_case_ = jnp.array(load_hf_numpy(self.get_file_format(_UpperCamelCase , _UpperCamelCase ) ) , dtype=_UpperCamelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[1_7, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_0_0_0, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) ->Union[str, Any]:
snake_case_, snake_case_ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[1_7, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_0_0_0, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) ->Dict:
snake_case_, snake_case_ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_UpperCamelCase )
snake_case_ = self.get_latents(_UpperCamelCase , shape=(4, 4, 9_6, 9_6) , fpaa=_UpperCamelCase )
snake_case_ = self.get_encoder_hidden_states(_UpperCamelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=_UpperCamelCase )
snake_case_ = model.apply(
{'''params''': params} , _UpperCamelCase , jnp.array(_UpperCamelCase , dtype=jnp.intaa ) , encoder_hidden_states=_UpperCamelCase , ).sample
assert sample.shape == latents.shape
snake_case_ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case_ = jnp.array(_UpperCamelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-2 )
| 8
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ : str = None
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : List[str] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ : List[Any] = """▁"""
class UpperCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = AlbertTokenizer
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Dict="[CLS]" , _lowerCAmelCase : List[Any]="[SEP]" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : Any="[SEP]" , _lowerCAmelCase : List[str]="<pad>" , _lowerCAmelCase : Union[str, Any]="[CLS]" , _lowerCAmelCase : int="[MASK]" , **_lowerCAmelCase : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 351
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase_ ( __UpperCAmelCase : str = "isbn/0140328726" ) -> dict:
SCREAMING_SNAKE_CASE_ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
SCREAMING_SNAKE_CASE_ = f"{olid} is not a valid Open Library olid"
raise ValueError(__UpperCAmelCase )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCAmelCase_ ( __UpperCAmelCase : dict ) -> dict:
SCREAMING_SNAKE_CASE_ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
SCREAMING_SNAKE_CASE_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
SCREAMING_SNAKE_CASE_ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
SCREAMING_SNAKE_CASE_ = data['First sentence']['value']
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = ', '.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Optional[Any] = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Union[str, Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 210
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding='utf-8', check=lowerCamelCase__, )
assert hasattr(self, 'env' )
def UpperCamelCase_ ( self, A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"{self.env.base_job_name}-single", instance_count=lowerCamelCase__, instance_type=self.instance_type, debugger_hook_config=lowerCamelCase__, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version='py36', )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase__ ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
SCREAMING_SNAKE_CASE : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds', 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json", 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, lowerCamelCase__ )
| 251
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Any = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
a__ : List[Any] = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
a__ : List[str] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Tuple = LxmertTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ) ->int:
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowerCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[Any] = getattr(_lowerCamelCase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case
SCREAMING_SNAKE_CASE : str = strip_accents
SCREAMING_SNAKE_CASE : Tuple = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[Any] = normalizer_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 19
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = DDIMPipeline
__SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__SCREAMING_SNAKE_CASE : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler()
SCREAMING_SNAKE_CASE : Dict = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
SCREAMING_SNAKE_CASE : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
SCREAMING_SNAKE_CASE : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''google/ddpm-cifar10-32'''
SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddim.to(_lowerCamelCase )
ddim.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ddim(generator=_lowerCamelCase , eta=0.0 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = '''google/ddpm-ema-bedroom-256'''
SCREAMING_SNAKE_CASE : List[str] = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = DDIMScheduler.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddpm.to(_lowerCamelCase )
ddpm.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ddpm(generator=_lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 19
| 1
|
from __future__ import annotations
class _lowerCamelCase:
def __init__( self, lowerCamelCase=None) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = data
_lowercase : List[str] = None
def __repr__( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = []
_lowercase : Dict = self
while temp:
string_rep.append(F'''{temp.data}''')
_lowercase : int = temp.next
return "->".join(lowerCamelCase)
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
if not elements_list:
raise Exception('The Elements List is empty' )
_lowercase : List[str] = Node(elements_list[0] )
for i in range(1 , len(lowerCamelCase_ ) ):
_lowercase : Optional[int] = Node(elements_list[i] )
_lowercase : str = current.next
return head
def UpperCamelCase_( lowerCamelCase_ ) -> None:
if head_node is not None and isinstance(lowerCamelCase_ , lowerCamelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase_( ) -> Dict:
from doctest import testmod
testmod()
_lowercase : str = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase_ )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 21
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase_ ).to(lowerCAmelCase_ )
_snake_case = AutoTokenizer.from_pretrained('google/mt5-small' )
_snake_case = tokenizer('Hello there' , return_tensors='pt' ).input_ids
_snake_case = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
_snake_case = model(input_ids.to(lowerCAmelCase_ ) , labels=labels.to(lowerCAmelCase_ ) ).loss
_snake_case = -(labels.shape[-1] * loss.item())
_snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 42
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowerCamelCase : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowerCamelCase : Dict = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )
_SCREAMING_SNAKE_CASE =float(spearmanr(_UpperCamelCase , _UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : str , _a : str , _a : List[Any] ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 371
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowerCamelCase : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowerCamelCase : Dict = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )
_SCREAMING_SNAKE_CASE =float(spearmanr(_UpperCamelCase , _UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : str , _a : str , _a : List[Any] ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 114
| 0
|
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 92
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : str="<unk>" ,lowerCamelCase__ : Union[str, Any]="<pad>" ,lowerCamelCase__ : int=125 ,lowerCamelCase__ : str=None ,**lowerCamelCase__ : Union[str, Any] ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [f'''<extra_id_{i}>''' for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda lowerCamelCase__ : bool('extra_id' in str(lowerCamelCase__ ) ) ,lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
UpperCAmelCase__ = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,extra_ids=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = 2**8 # utf is 8 bits
# define special tokens dict
UpperCAmelCase__ = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
UpperCAmelCase__ = len(self.special_tokens_encoder )
UpperCAmelCase__ = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = self.vocab_size + i - n
UpperCAmelCase__ = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : str ):
UpperCAmelCase__ = [chr(lowerCamelCase__ ) for i in text.encode('utf-8' )]
return tokens
def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : str ):
if token in self.special_tokens_encoder:
UpperCAmelCase__ = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
UpperCAmelCase__ = self.unk_token_id
else:
UpperCAmelCase__ = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : Any ):
if index in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[index]
else:
UpperCAmelCase__ = chr(index - self._num_special_tokens )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
UpperCAmelCase__ = b''
for token in tokens:
if token in self.special_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
UpperCAmelCase__ = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
UpperCAmelCase__ = token.encode('utf-8' )
else:
UpperCAmelCase__ = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
UpperCAmelCase__ = bstring.decode('utf-8' ,errors='ignore' )
return string
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
return ()
| 98
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowerCAmelCase__ : Tuple ="\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowerCAmelCase__ : List[str] ="\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowerCAmelCase__ : Optional[int] ="\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowerCAmelCase__ : List[str] ="\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowerCAmelCase__ : Tuple ="The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=[1, 1_0, 1_0_0] , lowerCAmelCase__=4 , lowerCAmelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCAmelCase__ ) as executor:
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = Counter()
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : int = defaultdict(lowerCAmelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ):
for candidate in candidates:
SCREAMING_SNAKE_CASE_ : Optional[Any] = candidate + '\n' + test_case
SCREAMING_SNAKE_CASE_ : List[Any] = (test_program, timeout, task_id, completion_id[task_id])
SCREAMING_SNAKE_CASE_ : List[Any] = executor.submit(lowerCAmelCase__ , *lowerCAmelCase__ )
futures.append(lowerCAmelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = [], []
for result in results.values():
result.sort()
SCREAMING_SNAKE_CASE_ : Tuple = [r[1]['passed'] for r in result]
total.append(len(lowerCAmelCase__ ) )
correct.append(sum(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE_ : Dict = np.array(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = k
SCREAMING_SNAKE_CASE_ : Any = {F'''pass@{k}''': estimate_pass_at_k(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a__ ( A__, A__, A__ ):
def estimator(A__, A__, A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) )
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = itertools.repeat(lowerCamelCase__, len(lowerCamelCase__ ) )
else:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = iter(lowerCamelCase__ )
return np.array([estimator(int(lowerCamelCase__ ), int(lowerCamelCase__ ), lowerCamelCase__ ) for n, c in zip(lowerCamelCase__, lowerCamelCase__ )] )
| 350
|
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1, A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( A__ ):
if len(A__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[0:mid]
SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _count_cross_inversions(A__, A__ )
SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ', A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_ : int = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
if __name__ == "__main__":
main()
| 162
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
a : List[str] = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Dict = np.random.randn(3 , 4)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , x.transpose()))
a : str = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , x.transpose((1, 2, 0))))
@require_torch
def __snake_case ( self : Tuple):
a : Union[str, Any] = np.random.randn(3 , 4)
a : Union[str, Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy()))
a : Tuple = np.random.randn(3 , 4 , 5)
a : Optional[int] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy()))
@require_tf
def __snake_case ( self : List[str]):
a : int = np.random.randn(3 , 4)
a : Optional[int] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , transpose(__UpperCAmelCase).numpy()))
a : Optional[Any] = np.random.randn(3 , 4 , 5)
a : List[str] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , transpose(__UpperCAmelCase , axes=(1, 2, 0)).numpy()))
@require_flax
def __snake_case ( self : str):
a : Union[str, Any] = np.random.randn(3 , 4)
a : Dict = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase) , np.asarray(transpose(__UpperCAmelCase))))
a : str = np.random.randn(3 , 4 , 5)
a : List[str] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0)) , np.asarray(transpose(__UpperCAmelCase , axes=(1, 2, 0)))))
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.reshape(__UpperCAmelCase , (4, 3))))
a : Dict = np.random.randn(3 , 4 , 5)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.reshape(__UpperCAmelCase , (12, 5))))
@require_torch
def __snake_case ( self : Tuple):
a : List[Any] = np.random.randn(3 , 4)
a : Tuple = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy()))
a : List[str] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy()))
@require_tf
def __snake_case ( self : Optional[int]):
a : List[Any] = np.random.randn(3 , 4)
a : Dict = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , reshape(__UpperCAmelCase , (4, 3)).numpy()))
a : Union[str, Any] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , reshape(__UpperCAmelCase , (12, 5)).numpy()))
@require_flax
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = np.random.randn(3 , 4)
a : Any = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3)) , np.asarray(reshape(__UpperCAmelCase , (4, 3)))))
a : List[str] = np.random.randn(3 , 4 , 5)
a : Union[str, Any] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5)) , np.asarray(reshape(__UpperCAmelCase , (12, 5)))))
def __snake_case ( self : Optional[int]):
a : Tuple = np.random.randn(1 , 3 , 4)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.squeeze(__UpperCAmelCase)))
a : List[str] = np.random.randn(1 , 4 , 1 , 5)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.squeeze(__UpperCAmelCase , axis=2)))
@require_torch
def __snake_case ( self : Tuple):
a : Dict = np.random.randn(1 , 3 , 4)
a : Any = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy()))
a : Any = np.random.randn(1 , 4 , 1 , 5)
a : str = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy()))
@require_tf
def __snake_case ( self : Tuple):
a : int = np.random.randn(1 , 3 , 4)
a : List[str] = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , squeeze(__UpperCAmelCase).numpy()))
a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
a : int = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , squeeze(__UpperCAmelCase , axis=2).numpy()))
@require_flax
def __snake_case ( self : Dict):
a : Tuple = np.random.randn(1 , 3 , 4)
a : List[Any] = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase) , np.asarray(squeeze(__UpperCAmelCase))))
a : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5)
a : Tuple = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2) , np.asarray(squeeze(__UpperCAmelCase , axis=2))))
def __snake_case ( self : List[Any]):
a : Optional[int] = np.random.randn(3 , 4)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.expand_dims(__UpperCAmelCase , axis=1)))
@require_torch
def __snake_case ( self : Optional[Any]):
a : Any = np.random.randn(3 , 4)
a : Optional[Any] = torch.tensor(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy()))
@require_tf
def __snake_case ( self : str):
a : int = np.random.randn(3 , 4)
a : int = tf.constant(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , expand_dims(__UpperCAmelCase , axis=1).numpy()))
@require_flax
def __snake_case ( self : List[Any]):
a : Optional[int] = np.random.randn(3 , 4)
a : Any = jnp.array(__UpperCAmelCase)
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1) , np.asarray(expand_dims(__UpperCAmelCase , axis=1))))
| 40
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a : Tuple = """pt"""
elif is_tf_available():
__a : int = """tf"""
else:
__a : Tuple = """jax"""
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = ByTaTokenizer
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__lowercase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowercase = list(filter(lambda lowerCAmelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__lowercase = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowercase = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = '''Unicode €.'''
__lowercase = tokenizer(lowerCAmelCase__ )
__lowercase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''Unicode €.</s>''' )
__lowercase = tokenizer('''e è é ê ë''' )
__lowercase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__lowercase = list(batch.input_ids.numpy()[0] )
else:
__lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowercase = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization. </s>''']
__lowercase = ['''Summary of the text. </s>''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowercase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['''input_ids'''][0] )
self.assertEqual(lowerCAmelCase__ , batch['''labels'''][0] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
__lowercase = [F"<extra_id_{i}>" for i in range(1_25 )]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowercase = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = 0
__lowercase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 210
| 0
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowerCamelCase : Optional[Any] = pytest.mark.integration
@require_faiss
class SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCamelCase__ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def A ( self : Dict ):
"""simple docstring"""
import faiss
UpperCamelCase = self._create_dummy_dataset()
UpperCamelCase = dset.map(
lambda UpperCamelCase__ , UpperCamelCase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ )
UpperCamelCase = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase , UpperCamelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A ( self : Dict ):
"""simple docstring"""
import faiss
UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase , UpperCamelCase = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A ( self : int ):
"""simple docstring"""
import faiss
UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase__ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase , UpperCamelCase = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCamelCase__ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A ( self : Any ):
"""simple docstring"""
from elasticsearch import Elasticsearch
UpperCamelCase = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
UpperCamelCase = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
UpperCamelCase = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
UpperCamelCase = np.zeros(5 , dtype=np.floataa )
UpperCamelCase = 1
UpperCamelCase , UpperCamelCase = index.search(UpperCamelCase__ )
self.assertRaises(UpperCamelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase , UpperCamelCase = index.search_batch(UpperCamelCase__ )
self.assertRaises(UpperCamelCase__ , index.search_batch , queries[0] )
UpperCamelCase = [scores[0] for scores in total_scores]
UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCamelCase__ ):
UpperCamelCase = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A ( self : int ):
"""simple docstring"""
import faiss
UpperCamelCase = faiss.IndexFlat(5 )
UpperCamelCase = FaissIndex(custom_index=UpperCamelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A ( self : Dict ):
"""simple docstring"""
import faiss
UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCamelCase__ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase = np.zeros(5 , dtype=np.floataa )
UpperCamelCase = 1
UpperCamelCase , UpperCamelCase = index.search(UpperCamelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
import faiss
UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase = 'index.faiss'
UpperCamelCase = F"""mock://{index_name}"""
index.save(UpperCAmelCase__ , storage_options=mockfs.storage_options )
UpperCamelCase = FaissIndex.load(UpperCAmelCase__ , storage_options=mockfs.storage_options )
UpperCamelCase = np.zeros(5 , dtype=np.floataa )
UpperCamelCase = 1
UpperCamelCase , UpperCamelCase = index.search(UpperCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class SCREAMING_SNAKE_CASE ( _snake_case ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase = Elasticsearch()
UpperCamelCase = {'acknowledged': True}
UpperCamelCase = ElasticSearchIndex(es_client=UpperCamelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
UpperCamelCase = 'foo'
UpperCamelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase , UpperCamelCase = index.search(UpperCamelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase = 'foo'
UpperCamelCase = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase , UpperCamelCase = index.search(UpperCamelCase__ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase = ['foo', 'bar', 'foobar']
UpperCamelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase , UpperCamelCase = index.search_batch(UpperCamelCase__ )
UpperCamelCase = [scores[0] for scores in total_scores]
UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase__ )
# batched queries with timeout
UpperCamelCase = ['foo', 'bar', 'foobar']
UpperCamelCase = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase , UpperCamelCase = index.search_batch(UpperCamelCase__ , request_timeout=3_0 )
UpperCamelCase = [scores[0] for scores in total_scores]
UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCamelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCamelCase__ )
| 363
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
# Load configuration defined in the metadata file
with open(A__ ) as metadata_file:
UpperCamelCase = json.load(A__ )
UpperCamelCase = LukeConfig(use_entity_aware_attention=A__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
UpperCamelCase = torch.load(A__ , map_location='cpu' )['module']
# Load the entity vocab file
UpperCamelCase = load_original_entity_vocab(A__ )
# add an entry for [MASK2]
UpperCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase = AddedToken('<ent>' , lstrip=A__ , rstrip=A__ )
UpperCamelCase = AddedToken('<ent2>' , lstrip=A__ , rstrip=A__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(A__ )
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'r' ) as f:
UpperCamelCase = json.load(A__ )
UpperCamelCase = 'MLukeTokenizer'
with open(os.path.join(A__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(A__ , A__ )
with open(os.path.join(A__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(A__ , A__ )
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
# Initialize the embeddings of the special tokens
UpperCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
UpperCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
UpperCamelCase = state_dict['embeddings.word_embeddings.weight']
UpperCamelCase = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase = state_dict[bias_name]
UpperCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase = F"""encoder.layer.{layer_index}.attention.self."""
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
UpperCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase = state_dict['entity_predictions.bias']
UpperCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
UpperCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase = LukeForMaskedLM(config=A__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
UpperCamelCase = state_dict[key]
else:
UpperCamelCase = state_dict[key]
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
if set(A__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(A__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ , task='entity_classification' )
UpperCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
UpperCamelCase = (0, 9)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 33, 768) )
UpperCamelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase = torch.Size((1, 1, 768) )
UpperCamelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase = MLukeTokenizer.from_pretrained(A__ )
UpperCamelCase = 'Tokyo is the capital of <mask>.'
UpperCamelCase = (24, 30)
UpperCamelCase = tokenizer(A__ , entity_spans=[span] , return_tensors='pt' )
UpperCamelCase = model(**A__ )
UpperCamelCase = encoding['input_ids'][0].tolist()
UpperCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
UpperCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A__ )
UpperCamelCase = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(A__ ) )
model.save_pretrained(A__ )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = ['[MASK]', '[PAD]', '[UNK]']
UpperCamelCase = [json.loads(A__ ) for line in open(A__ )]
UpperCamelCase = {}
for entry in data:
UpperCamelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase = entity_id
break
UpperCamelCase = F"""{language}:{entity_name}"""
UpperCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 249
| 0
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''spiece.model'''}
__A ={
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
__A ={
'''google/bigbird-roberta-base''': 4_0_9_6,
'''google/bigbird-roberta-large''': 4_0_9_6,
'''google/bigbird-base-trivia-itc''': 4_0_9_6,
}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = []
def __init__( self , lowercase , lowercase="<unk>" , lowercase="<s>" , lowercase="</s>" , lowercase="<pad>" , lowercase="[SEP]" , lowercase="[MASK]" , lowercase="[CLS]" , lowercase = None , **lowercase , ) -> None:
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else bos_token
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else eos_token
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else unk_token
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else pad_token
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else cls_token
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , mask_token=lowercase , cls_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , lowercase ) -> Tuple:
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
return self.sp_model.piece_to_id(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = self.sp_model.IdToPiece(lowercase )
return token
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
lowerCamelCase_ = []
lowerCamelCase_ = ""
lowerCamelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
lowerCamelCase_ = True
lowerCamelCase_ = []
else:
current_sub_tokens.append(lowercase )
lowerCamelCase_ = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = None , lowercase = True , **lowercase , ) -> str:
lowerCamelCase_ = kwargs.pop("use_source_tokenizer" , lowercase )
lowerCamelCase_ = self.convert_ids_to_tokens(lowercase , skip_special_tokens=lowercase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase ) )
lowerCamelCase_ = []
sub_texts.append(lowercase )
else:
current_sub_text.append(lowercase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCamelCase_ = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(lowercase ) )
else:
lowerCamelCase_ = "".join(lowercase )
lowerCamelCase_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase_ = self.clean_up_tokenization(lowercase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
lowerCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 19
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 19
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _A :
snake_case__ : int
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def UpperCAmelCase__ ( ) -> Node | None:
'''simple docstring'''
lowercase = Node(1 )
lowercase = Node(2 )
lowercase = Node(3 )
lowercase = Node(4 )
lowercase = Node(5 )
return tree
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
if root is None:
return output
lowercase = deque([root] )
while process_queue:
lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
lowercase = []
lowercase = 0
lowercase = height(lowerCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = 0
return output
def UpperCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
lowercase = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase__ )}' )
print(f'Pre-order Traversal: {preorder(lowerCAmelCase__ )}' )
print(f'Post-order Traversal: {postorder(lowerCAmelCase__ )}' , """\n""" )
print(f'Height of Tree: {height(lowerCAmelCase__ )}' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCAmelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCAmelCase__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__A : Optional[Any] = logging.get_logger(__name__)
__A : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : List[str] = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
__A : str = {
"Salesforce/codegen-350M-mono": 2_048,
}
class _UpperCAmelCase ( lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ : List[str] = CodeGenTokenizer
def __init__( self : Dict , A : Union[str, Any]=None , A : str=None , A : Dict=None , A : Tuple="<|endoftext|>" , A : Dict="<|endoftext|>" , A : List[str]="<|endoftext|>" , A : Optional[int]=False , **A : Any , ) -> Union[str, Any]:
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
if kwargs.pop('''add_bos_token''' , __lowercase ):
lowercase_ : List[Any] = kwargs.pop('''name_or_path''' , '''''' )
raise ValueError(
'''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'''
'''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'''
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'''
''' so that the fast tokenizer works correctly.''' )
lowercase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowercase ) != add_prefix_space:
lowercase_ : Any = getattr(__lowercase , pre_tok_state.pop('''type''' ) )
lowercase_ : Optional[int] = add_prefix_space
lowercase_ : Tuple = pre_tok_class(**__lowercase )
lowercase_ : str = add_prefix_space
def A ( self : str , *A : Optional[Any] , **A : str ) -> BatchEncoding:
lowercase_ : str = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def A ( self : Dict , *A : Any , **A : Any ) -> BatchEncoding:
lowercase_ : List[str] = kwargs.get('''is_split_into_words''' , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def A ( self : Tuple , A : str , A : Optional[str] = None ) -> Tuple[str]:
lowercase_ : List[str] = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def A ( self : List[Any] , A : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , A : bool = False , A : bool = None , A : Optional[List[str]] = None , **A : Union[str, Any] , ) -> str:
lowercase_ : Any = super().decode(
token_ids=__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
if truncate_before_pattern is not None and len(__lowercase ) > 0:
lowercase_ : List[str] = self.truncate(__lowercase , __lowercase )
return decoded_text
def A ( self : int , A : List[Any] , A : Tuple ) -> List[Any]:
def find_re(A : Optional[int] , A : int , A : Optional[Any] ):
lowercase_ : str = pattern.search(__lowercase , __lowercase )
return m.start() if m else -1
lowercase_ : Dict = [re.compile(__lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase_ : List[Any] = list(re.finditer('''^print''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
lowercase_ : Tuple = completion[: prints[1].start()]
lowercase_ : Union[str, Any] = list(re.finditer('''^def''' , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
lowercase_ : Any = completion[: defs[1].start()]
lowercase_ : Optional[int] = 0
lowercase_ : Any = [
pos for pos in [find_re(__lowercase , __lowercase , __lowercase ) for terminal in terminals] if pos != -1
]
if len(__lowercase ) > 0:
return completion[: min(__lowercase )]
else:
return completion
| 33
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Any = ['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 0.9 , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None:
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Any = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Tuple = crop_pct
__UpperCAmelCase : List[Any] = resample
__UpperCAmelCase : List[Any] = do_center_crop
__UpperCAmelCase : List[Any] = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Tuple = rescale_factor
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[float] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Union[str, Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : Tuple = int(size["""height"""] / crop_pct )
else:
__UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
__UpperCAmelCase : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
else:
if "shortest_edge" in size:
__UpperCAmelCase : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__UpperCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray:
__UpperCAmelCase : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> int:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : int = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[int] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Any = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 114
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float , lowercase : int ):
'''simple docstring'''
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowercase , lowercase ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
lowerCamelCase_ = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCamelCase_ = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase : Tuple = "sshleifer/mar_enro_6_3_student"
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=A_ , )
lowerCamelCase_ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
MarianMTModel.from_pretrained(A_ )
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCamelCase_ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCamelCase_ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCamelCase_ = bash_script.replace(A_ , str(A_ ) )
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCamelCase_ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCamelCase_ = ['finetune.py'] + bash_script.split() + args
with patch.object(A_ , 'argv' , A_ ):
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = pl.Trainer.add_argparse_args(A_ )
lowerCamelCase_ = SummarizationModule.add_model_specific_args(A_ , os.getcwd() )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = main(A_ )
# Check metrics
lowerCamelCase_ = load_json(model.metrics_save_path )
lowerCamelCase_ = metrics['val'][0]
lowerCamelCase_ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , A_ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase_ = os.listdir(A_ )
lowerCamelCase_ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCamelCase_ = os.path.join(args.output_dir , A_ )
lowerCamelCase_ = torch.load(A_ , map_location='cpu' )
lowerCamelCase_ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase_ = {os.path.basename(A_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class A( UpperCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowerCamelCase_ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCamelCase_ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCamelCase_ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCamelCase_ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCamelCase_ = bash_script.replace(A_ , str(A_ ) )
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = bash_script.replace('--fp16' , '' )
lowerCamelCase_ = 6
lowerCamelCase_ = (
['distillation.py']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'--gpus=1',
'--learning_rate=1e-3',
f"""--num_train_epochs={epochs}""",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(A_ , 'argv' , A_ ):
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = pl.Trainer.add_argparse_args(A_ )
lowerCamelCase_ = SummarizationDistiller.add_model_specific_args(A_ , os.getcwd() )
lowerCamelCase_ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCamelCase_ = distill_main(A_ )
# Check metrics
lowerCamelCase_ = load_json(model.metrics_save_path )
lowerCamelCase_ = metrics['val'][0]
lowerCamelCase_ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , A_ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase_ = os.listdir(A_ )
lowerCamelCase_ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCamelCase_ = os.path.join(args.output_dir , A_ )
lowerCamelCase_ = torch.load(A_ , map_location='cpu' )
lowerCamelCase_ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase_ = {os.path.basename(A_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 208
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __SCREAMING_SNAKE_CASE (TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
"""simple docstring"""
def __init__( self : Any , __a : Tuple=None , **__a : Tuple ):
super().__init__(features=__a )
_a = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCamelCase__ ( self : Optional[Any] , __a : Any ):
import torch
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__a )
return column
def UpperCamelCase__ ( self : Optional[int] , __a : Tuple ):
import torch
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_a = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_a = {"dtype": torch.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_a = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
_a = np.asarray(__a )
return torch.tensor(__a , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCamelCase__ ( self : Optional[int] , __a : List[Any] ):
import torch
# support for torch, tf, jax etc.
if hasattr(__a , "__array__" ) and not isinstance(__a , torch.Tensor ):
_a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def UpperCamelCase__ ( self : int , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def UpperCamelCase__ ( self : Any , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_row(__a )
_a = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def UpperCamelCase__ ( self : List[Any] , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_column(__a )
_a = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
_a = self.recursive_tensorize(__a )
_a = self._consolidate(__a )
return column
def UpperCamelCase__ ( self : Optional[Any] , __a : pa.Table ):
_a = self.numpy_arrow_extractor().extract_batch(__a )
_a = self.python_features_decoder.decode_batch(__a )
_a = self.recursive_tensorize(__a )
for column_name in batch:
_a = self._consolidate(batch[column_name] )
return batch
| 63
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for dlist, weight in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = min(UpperCAmelCase__ )
A_ = max(UpperCAmelCase__ )
A_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[float]:
A_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
A_ = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = get_data(UpperCAmelCase__ )
A_ = calculate_each_score(UpperCAmelCase__, UpperCAmelCase__ )
A_ = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 162
| 0
|
import pprint
import requests
A : int = 'https://zenquotes.io/api'
def UpperCamelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def UpperCamelCase ( ) -> list:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
A : Dict = random_quotes()
pprint.pprint(response)
| 353
|
A : Union[str, Any] = [
(1_0_0_0, 'M'),
(9_0_0, 'CM'),
(5_0_0, 'D'),
(4_0_0, 'CD'),
(1_0_0, 'C'),
(9_0, 'XC'),
(5_0, 'L'),
(4_0, 'XL'),
(1_0, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
lowercase__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
lowercase__ = 0
lowercase__ = 0
while place < len(__magic_name__ ):
if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase ( __magic_name__ : int ) -> str:
"""simple docstring"""
lowercase__ = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ )
result.append(roman * factor )
if number == 0:
break
return "".join(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , ) -> List[str]:
'''simple docstring'''
a__ : int =parent
a__ : Dict =batch_size
a__ : Any =image_size
a__ : int =patch_size
a__ : Union[str, Any] =num_channels
a__ : List[str] =is_training
a__ : str =use_labels
a__ : Tuple =hidden_size
a__ : str =num_hidden_layers
a__ : Dict =num_attention_heads
a__ : Union[str, Any] =intermediate_size
a__ : int =hidden_act
a__ : str =hidden_dropout_prob
a__ : Optional[int] =attention_probs_dropout_prob
a__ : Any =type_sequence_label_size
a__ : List[str] =initializer_range
a__ : Tuple =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] =(image_size // patch_size) ** 2
a__ : Optional[Any] =num_patches + 1
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Any =None
if self.use_labels:
a__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Tuple =self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =ViTMSNModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : Optional[Any] =self.type_sequence_label_size
a__ : Union[str, Any] =ViTMSNForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Dict =model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Tuple =1
a__ : str =ViTMSNForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : List[str] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] =self.prepare_config_and_inputs()
a__ , a__ , a__ : str =config_and_inputs
a__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[int] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Any = False
_lowercase : Optional[int] = False
_lowercase : List[Any] = False
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : int =ViTMSNModelTester(self )
a__ : Optional[Any] =ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ , a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] =model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : List[str] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int =model_class(lowerCAmelCase__ )
a__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : int =[*signature.parameters.keys()]
a__ : str =["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[int] =ViTMSNModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( ):
"""simple docstring"""
a__ : Tuple =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(2 )
a__ : Tuple =ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(lowerCAmelCase__ )
a__ : Optional[int] =self.default_image_processor
a__ : Tuple =prepare_img()
a__ : Union[str, Any] =image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
a__ : Tuple =model(**lowerCAmelCase__ )
# verify the logits
a__ : Tuple =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
a__ : Tuple =torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 95
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 249
| 0
|
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCamelCase_ :
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : List[str] ) -> str:
raise NotImplementedError()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
raise NotImplementedError()
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : "AutoTokenizer" , UpperCAmelCase__ : bool = False , **UpperCAmelCase__ : Any ) -> Dict:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = skip_prompt
__SCREAMING_SNAKE_CASE = decode_kwargs
# variables used in the streaming process
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str ) -> List[str]:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
__SCREAMING_SNAKE_CASE = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__SCREAMING_SNAKE_CASE = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# If the last token is a CJK character, we print the characters.
elif len(_A ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__SCREAMING_SNAKE_CASE = text[self.print_len :]
self.print_len += len(_A )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__SCREAMING_SNAKE_CASE = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(_A )
self.on_finalized_text(_A )
def UpperCAmelCase_ ( self : Any ) -> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__SCREAMING_SNAKE_CASE = text[self.print_len :]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = True
self.on_finalized_text(_A , stream_end=_A )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> Dict:
print(_A , flush=_A , end="" if not stream_end else None )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] ) -> Tuple:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f)
or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) #
or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) #
or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) #
or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) #
or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f)
or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) #
): #
return True
return False
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : "AutoTokenizer" , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[float] = None , **UpperCAmelCase__ : str ) -> str:
super().__init__(_A , _A , **_A )
__SCREAMING_SNAKE_CASE = Queue()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = timeout
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> List[str]:
self.text_queue.put(_A , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Union[str, Any] ) -> Dict:
return self
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 357
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase__ : WhisperForConditionalGeneration , UpperCAmelCase__ : WhisperProcessor , UpperCAmelCase__ : AutoencoderKL , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase__ : StableDiffusionSafetyChecker , UpperCAmelCase__ : CLIPImageProcessor , ) -> Optional[int]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=UpperCAmelCase__ , speech_processor=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=1_6_0_0_0 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_1_2 , UpperCAmelCase__ : int = 5_0 , UpperCAmelCase__ : float = 7.5 , UpperCAmelCase__ : Optional[Union[str, List[str]]] = None , UpperCAmelCase__ : Optional[int] = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Optional[torch.Generator] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , **UpperCAmelCase__ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
UpperCAmelCase__ , return_tensors="pt" , sampling_rate=UpperCAmelCase__ ).input_features.to(self.device )
__SCREAMING_SNAKE_CASE = self.speech_model.generate(UpperCAmelCase__ , max_length=4_8_0_0_0_0 )
__SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , normalize=UpperCAmelCase__ )[
0
]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase__ )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !="""
F""" {type(UpperCAmelCase__ )}.""" )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="max_length" , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , UpperCAmelCase__ , 1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device="cpu" , dtype=UpperCAmelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ , UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(UpperCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase__ , nsfw_content_detected=UpperCAmelCase__ )
| 195
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.