code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case (__lowercase , __lowercase):
return math.sqrt(sum(pow(a - b , 2) for a, b in zip(__lowercase , __lowercase)))
def _snake_case (__lowercase , __lowercase):
if dataset.ndim != value_array.ndim:
UpperCamelCase_ = (
'Wrong input data\'s dimensions... '
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowercase)
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCamelCase_ = (
'Wrong input data\'s shape... '
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowercase)
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape')
if dataset.dtype != value_array.dtype:
UpperCamelCase_ = (
'Input data have different datatype... '
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowercase)
UpperCamelCase_ = []
for value in value_array:
UpperCamelCase_ = euclidean(__lowercase , dataset[0])
UpperCamelCase_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCamelCase_ = euclidean(__lowercase , __lowercase)
if dist > temp_dist:
UpperCamelCase_ = temp_dist
UpperCamelCase_ = dataset_value.tolist()
answer.append([vector, dist])
return answer
def _snake_case (__lowercase , __lowercase):
return np.dot(__lowercase , __lowercase) / (norm(__lowercase) * norm(__lowercase))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : Optional[Any]=None , snake_case__ : Any=None ):
if attention_mask is None:
snake_case__ : Optional[int] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = OPTConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=16 , lowerCamelCase=16 , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : List[str] = batch_size
snake_case__ : str = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Union[str, Any] = eos_token_id
snake_case__ : Optional[int] = pad_token_id
snake_case__ : Dict = bos_token_id
snake_case__ : List[Any] = embed_dim
snake_case__ : Tuple = word_embed_proj_dim
snake_case__ : Any = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : int = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase , **self.config_updates , )
snake_case__ : Dict = prepare_opt_inputs_dict(lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = TFOPTModel(config=lowerCamelCase )
snake_case__ : str = inputs_dict['''input_ids''']
snake_case__ : List[str] = input_ids[:1, :]
snake_case__ : Tuple = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Optional[Any] = 1
# first forward pass
snake_case__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ : Any = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCAmelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCAmelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 1_0
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = TFOPTModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case__ : Tuple = model_class(config=lowerCamelCase )
snake_case__ : Tuple = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : List[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase )
snake_case__ : int = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : Optional[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case__ : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase )
# check that weights remain the same after resizing
snake_case__ : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[int] = False
self.assertTrue(lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase )
snake_case__ : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[Any] = False
self.assertTrue(lowerCamelCase )
def _A ( snake_case__ : Optional[Any] ):
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = 9_9
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case__ : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case__ : Dict = input_ids.shape[0]
snake_case__ : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
snake_case__ : List[str] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case__ : Optional[Any] = tf.not_equal(lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
snake_case__ : List[str] = model(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ).last_hidden_state
snake_case__ : Dict = (1, 11, 512)
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : int = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-3 ) )
snake_case__ : Optional[int] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : Dict = xla_generate(lowerCamelCase , lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-2 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> int:
"""simple docstring"""
super().setUp()
snake_case__ : str = '''facebook/opt-350m'''
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case__ : List[str] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ : List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case__ : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
snake_case__ : Optional[Any] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : int = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''facebook/opt-125m'''
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Dict = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : Tuple = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : Optional[int] = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Optional[int] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = '''facebook/opt-350m'''
snake_case__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
snake_case__ : str = '''left'''
# use different length sentences to test batching
snake_case__ : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase )
snake_case__ : Tuple = inputs['''input_ids''']
snake_case__ : Any = model.generate(input_ids=lowerCamelCase , attention_mask=inputs['''attention_mask'''] )
snake_case__ : Tuple = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase )
snake_case__ : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
snake_case__ : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(input_ids=lowerCamelCase , max_length=model.config.max_length - num_paddings )
snake_case__ : int = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Any = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , [non_padded_sentence, padded_sentence] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = '''facebook/opt-350m'''
snake_case__ : int = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Optional[Any] = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Tuple = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 261
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 55
|
import numpy as np
_SCREAMING_SNAKE_CASE : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ):
A__ : List[Any] = np.array(UpperCamelCase__ )
def __snake_case ( self , UpperCamelCase__ ):
A__ , A__ : Any = np.where(letter == self.SQUARE )
A__ : int = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ ):
A__ : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __snake_case ( self , UpperCamelCase__ ):
A__ : List[str] = message.lower()
A__ : str = message.replace(''' ''' , '''''' )
A__ : Union[str, Any] = message.replace('''j''' , '''i''' )
A__ : List[Any] = np.empty((2, len(UpperCamelCase__ )) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : Any = self.letter_to_numbers(message[letter_index] )
A__ : Optional[Any] = numbers[0]
A__ : List[str] = numbers[1]
A__ : List[str] = first_step.reshape(2 * len(UpperCamelCase__ ) )
A__ : List[Any] = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Dict = int(second_step[numbers_index * 2] )
A__ : List[str] = int(second_step[(numbers_index * 2) + 1] )
A__ : Dict = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = encoded_message + letter
return encoded_message
def __snake_case ( self , UpperCamelCase__ ):
A__ : str = message.lower()
message.replace(''' ''' , '''''' )
A__ : List[Any] = np.empty(2 * len(UpperCamelCase__ ) )
for letter_index in range(len(UpperCamelCase__ ) ):
A__ : List[str] = self.letter_to_numbers(message[letter_index] )
A__ : Dict = numbers[0]
A__ : int = numbers[1]
A__ : Optional[Any] = first_step.reshape((2, len(UpperCamelCase__ )) )
A__ : int = ''''''
for numbers_index in range(len(UpperCamelCase__ ) ):
A__ : Tuple = int(second_step[0, numbers_index] )
A__ : Dict = int(second_step[1, numbers_index] )
A__ : List[str] = self.numbers_to_letter(UpperCamelCase__ , UpperCamelCase__ )
A__ : Tuple = decoded_message + letter
return decoded_message
| 55
| 1
|
from collections import Counter
from timeit import timeit
def A__ (snake_case : str = "" , ) -> List[str]:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def A__ (snake_case : str = "" ) -> int:
if len(lowerCamelCase_ ) == 0:
return True
__UpperCamelCase : Any = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
__UpperCamelCase : Any = character_freq_dict.get(lowerCamelCase_ , 0 ) + 1
__UpperCamelCase : Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A__ (snake_case : str = "" ) -> str:
print("""\nFor string = """ , lowerCamelCase_ , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowerCamelCase_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
a__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 279
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'perceiver'
def __init__( self : Any , snake_case_ : str=256 , snake_case_ : str=1280 , snake_case_ : str=768 , snake_case_ : List[Any]=1 , snake_case_ : int=26 , snake_case_ : Union[str, Any]=8 , snake_case_ : Any=8 , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , snake_case_ : int="kv" , snake_case_ : List[str]=1 , snake_case_ : str=1 , snake_case_ : Optional[Any]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : List[str]=0.02 , snake_case_ : Any=1E-12 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=262 , snake_case_ : Tuple=2048 , snake_case_ : List[str]=56 , snake_case_ : Optional[Any]=[368, 496] , snake_case_ : str=16 , snake_case_ : Any=1920 , snake_case_ : Optional[int]=16 , snake_case_ : Optional[Any]=[1, 16, 224, 224] , **snake_case_ : Tuple , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A : List[Any] = num_latents
A : List[Any] = d_latents
A : List[str] = d_model
A : Optional[int] = num_blocks
A : int = num_self_attends_per_block
A : int = num_self_attention_heads
A : List[Any] = num_cross_attention_heads
A : Union[str, Any] = qk_channels
A : Optional[int] = v_channels
A : Union[str, Any] = cross_attention_shape_for_attention
A : List[Any] = self_attention_widening_factor
A : List[str] = cross_attention_widening_factor
A : Optional[int] = hidden_act
A : List[Any] = attention_probs_dropout_prob
A : str = initializer_range
A : Tuple = layer_norm_eps
A : List[str] = use_query_residual
# masked language modeling attributes
A : Tuple = vocab_size
A : int = max_position_embeddings
# image classification attributes
A : Dict = image_size
# flow attributes
A : int = train_size
# multimodal autoencoding attributes
A : Dict = num_frames
A : int = audio_samples_per_frame
A : str = samples_per_patch
A : Dict = output_shape
class _SCREAMING_SNAKE_CASE ( snake_case ):
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
A : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 40 , snake_case_ : int = 40 , ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : str = preprocessor.num_special_tokens_to_add(snake_case_ )
A : str = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
A : int = [''' '''.join(['''a'''] ) * seq_length] * batch_size
A : List[str] = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
A : Any = inputs.pop('''input_ids''' )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[Any] = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
A : str = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
A : Optional[Any] = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
A : List[Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 256
| 0
|
def A_ ( A__ = 100_0000 ) -> int:
a__ : Optional[Any] = limit + 1
a__ : Any = [0] * limit
for first_term in range(1 , A__ ):
for n in range(A__ , A__ , A__ ):
a__ : List[Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a__ : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 392
|
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _A : List[Any] , _A : Union[str, Any]=13 , _A : List[str]=3 , _A : str=224 , _A : Optional[Any]=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : Union[str, Any]=None , _A : Optional[int]=True , _A : List[Any]=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Any = parent
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_size
__SCREAMING_SNAKE_CASE : Any = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : int = do_resize
__SCREAMING_SNAKE_CASE : Optional[int] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : Any = image_std
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __UpperCamelCase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = EfficientFormerImageProcessorTester(self )
@property
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processor(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 74
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : int = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 591
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = 0.9 , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = 1 / 255 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
_a : str = size if size is not None else {'''shortest_edge''': 224}
_a : List[Any] = get_size_dict(lowercase , default_to_square=lowercase )
_a : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a : List[Any] = get_size_dict(lowercase , param_name='''crop_size''' )
_a : List[str] = do_resize
_a : int = size
_a : Tuple = crop_pct
_a : List[Any] = resample
_a : Dict = do_center_crop
_a : Union[str, Any] = crop_size
_a : int = do_rescale
_a : List[str] = rescale_factor
_a : Any = do_normalize
_a : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_a : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def snake_case__( self , lowercase , lowercase , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
_a : Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
_a : Tuple = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_a : str = int(size['''height'''] / crop_pct )
else:
_a : Optional[Any] = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(lowercase ) )
_a : Optional[int] = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
else:
if "shortest_edge" in size:
_a : Optional[Any] = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
elif "height" in size and "width" in size:
_a : int = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(lowercase ) )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def snake_case__( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
_a : List[str] = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def snake_case__( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Dict:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def snake_case__( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def snake_case__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
_a : List[str] = do_resize if do_resize is not None else self.do_resize
_a : str = crop_pct if crop_pct is not None else self.crop_pct
_a : int = resample if resample is not None else self.resample
_a : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : int = do_rescale if do_rescale is not None else self.do_rescale
_a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : int = do_normalize if do_normalize is not None else self.do_normalize
_a : List[Any] = image_mean if image_mean is not None else self.image_mean
_a : List[str] = image_std if image_std is not None else self.image_std
_a : Union[str, Any] = size if size is not None else self.size
_a : Dict = get_size_dict(lowercase , default_to_square=lowercase )
_a : int = crop_size if crop_size is not None else self.crop_size
_a : Dict = get_size_dict(lowercase , param_name='''crop_size''' )
_a : Union[str, Any] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a : int = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_a : Optional[int] = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
_a : Union[str, Any] = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
_a : Union[str, Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_a : Any = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_a : List[Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_a : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 307
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = '''xlnet'''
lowercase = ['''mems''']
lowercase = {
'''n_token''': '''vocab_size''', # Backward compatibility
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowercase=32_000 , lowercase=1_024 , lowercase=24 , lowercase=16 , lowercase=4_096 , lowercase="gelu" , lowercase=True , lowercase="bi" , lowercase=0.02 , lowercase=1e-12 , lowercase=0.1 , lowercase=512 , lowercase=None , lowercase=True , lowercase=False , lowercase=False , lowercase=-1 , lowercase=False , lowercase="last" , lowercase=True , lowercase="tanh" , lowercase=0.1 , lowercase=5 , lowercase=5 , lowercase=5 , lowercase=1 , lowercase=2 , **lowercase , ) -> Optional[Any]:
_a : str = vocab_size
_a : int = d_model
_a : str = n_layer
_a : List[str] = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
_a : Dict = d_model // n_head
_a : int = ff_activation
_a : List[Any] = d_inner
_a : str = untie_r
_a : Any = attn_type
_a : List[Any] = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = dropout
_a : List[str] = mem_len
_a : str = reuse_len
_a : int = bi_data
_a : List[str] = clamp_len
_a : List[str] = same_length
_a : List[str] = summary_type
_a : List[str] = summary_use_proj
_a : List[Any] = summary_activation
_a : int = summary_last_dropout
_a : List[str] = start_n_top
_a : Optional[Any] = end_n_top
_a : Tuple = bos_token_id
_a : Optional[int] = pad_token_id
_a : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , lowercase , )
_a : Union[str, Any] = kwargs['''use_cache''']
_a : int = use_mems_eval
_a : Optional[int] = use_mems_train
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
@property
def snake_case__( self ) -> Optional[int]:
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def snake_case__( self , lowercase ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 307
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = tree
def snake_case ( self ,snake_case__ ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = '''char'''
A = '''bpe'''
A = '''wp'''
_a : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = ['''image_processor''', '''char_tokenizer''']
A = '''ViTImageProcessor'''
A = '''MgpstrTokenizer'''
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
__lowerCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
__lowerCamelCase = kwargs.pop("""feature_extractor""" )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained("""gpt2""" )
__lowerCamelCase = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ):
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__lowerCamelCase = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
__lowerCamelCase = self.char_tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase = encodings["""input_ids"""]
return inputs
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = sequences
__lowerCamelCase = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """char""" )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """bpe""" )
__lowerCamelCase , __lowerCamelCase = self._decode_helper(UpperCAmelCase , """wp""" )
__lowerCamelCase = []
__lowerCamelCase = []
for i in range(UpperCAmelCase ):
__lowerCamelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase = scores.index(max(UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase = {}
__lowerCamelCase = final_strs
__lowerCamelCase = final_scores
__lowerCamelCase = char_strs
__lowerCamelCase = bpe_strs
__lowerCamelCase = wp_strs
return out
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
if format == DecodeType.CHARACTER:
__lowerCamelCase = self.char_decode
__lowerCamelCase = 1
__lowerCamelCase = """[s]"""
elif format == DecodeType.BPE:
__lowerCamelCase = self.bpe_decode
__lowerCamelCase = 2
__lowerCamelCase = """#"""
elif format == DecodeType.WORDPIECE:
__lowerCamelCase = self.wp_decode
__lowerCamelCase = 1_0_2
__lowerCamelCase = """[SEP]"""
else:
raise ValueError(f'''Format {format} is not supported.''' )
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = pred_logits.size(0 )
__lowerCamelCase = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase = pred_logits.topk(1 , dim=-1 , largest=UpperCAmelCase , sorted=UpperCAmelCase )
__lowerCamelCase = preds_index.view(-1 , UpperCAmelCase )[:, 1:]
__lowerCamelCase = decoder(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase = torch.nn.functional.softmax(UpperCAmelCase , dim=2 ).max(dim=2 )
__lowerCamelCase = preds_max_prob[:, 1:]
for index in range(UpperCAmelCase ):
__lowerCamelCase = preds_str[index].find(UpperCAmelCase )
__lowerCamelCase = preds_str[index][:pred_eos]
__lowerCamelCase = preds_index[index].cpu().tolist()
__lowerCamelCase = pred_index.index(UpperCAmelCase ) if eos_token in pred_index else -1
__lowerCamelCase = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCAmelCase )
conf_scores.append(UpperCAmelCase )
return dec_strs, conf_scores
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
def lowerCamelCase_ ( self , UpperCAmelCase ):
return self.bpe_tokenizer.batch_decode(UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(UpperCAmelCase )]
return decode_strs
| 479
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase = False
class lowerCamelCase ( unittest.TestCase ):
def A( self , lowercase__=3_2):
set_seed(0)
__UpperCAmelCase : Dict = UNetaDModel(sample_size=lowercase__ , in_channels=3 , out_channels=3)
__UpperCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A( self):
__UpperCAmelCase : Any = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__UpperCAmelCase : Any = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowercase__ , )
__UpperCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowercase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__UpperCAmelCase : str = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowercase__) for _ in range(4)]
__UpperCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2)).to(lowercase__) for _ in range(4)]
__UpperCAmelCase : Any = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowercase__) for _ in range(4)]
# train with a DDPM scheduler
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.get_model_optimizer(resolution=3_2)
model.train().to(lowercase__)
for i in range(4):
optimizer.zero_grad()
__UpperCAmelCase : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__UpperCAmelCase : Dict = model(lowercase__ , timesteps[i]).sample
__UpperCAmelCase : Dict = torch.nn.functional.mse_loss(lowercase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2)
model.train().to(lowercase__)
for i in range(4):
optimizer.zero_grad()
__UpperCAmelCase : int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , timesteps[i]).sample
__UpperCAmelCase : str = torch.nn.functional.mse_loss(lowercase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
| 675
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675
| 1
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A_ ( lowercase_ , lowercase_=False ) -> List[str]:
_snake_case : Union[str, Any] = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def A_ ( lowercase_ , lowercase_=None , lowercase_=None ) -> Any:
if conf_path is None:
_snake_case : Union[str, Any] = '''./model_checkpoints/vqgan_only.yaml'''
_snake_case : List[Any] = load_config(lowercase_ , display=lowercase_ )
_snake_case : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
_snake_case : List[str] = '''./model_checkpoints/vqgan_only.pt'''
_snake_case : Dict = torch.load(lowercase_ , map_location=lowercase_ )
if ".ckpt" in ckpt_path:
_snake_case : List[str] = sd['''state_dict''']
model.load_state_dict(lowercase_ , strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def A_ ( lowercase_ , lowercase_ ) -> int:
_snake_case , _snake_case , _snake_case : Tuple = model.encode(lowercase_ )
print(f'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
_snake_case : Optional[int] = model.decode(lowercase_ )
return xrec
def A_ ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
_snake_case , _snake_case : Tuple = string.rsplit('''.''' , 1 )
if reload:
_snake_case : Optional[int] = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ , package=lowercase_ ) , cls )
def A_ ( lowercase_ ) -> Union[str, Any]:
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def A_ ( lowercase_ , lowercase_ , lowercase_=True , lowercase_=True ) -> Tuple:
_snake_case : Any = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
# load the specified checkpoint
if ckpt:
_snake_case : Tuple = torch.load(lowercase_ , map_location='''cpu''' )
_snake_case : Optional[int] = pl_sd['''global_step''']
print(f'''loaded model from global step {global_step}.''' )
else:
_snake_case : int = {'''state_dict''': None}
_snake_case : List[Any] = None
_snake_case : int = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase_ , eval_mode=lowercase_ )['''model''']
return model, global_step
| 326
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = "pytorch_model.bin"
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,)
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The name of the task to train on."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""" ,metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Random seed for initialization."""} ,)
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
_snake_case : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case : Any = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case : Any = int(eval_result * len(lowercase_ ) )
print(lowercase_ )
_snake_case : Optional[int] = dataset.sort('''probability''' , reverse=lowercase_ )
_snake_case : int = dataset.select(range(lowercase_ ) )
_snake_case : Union[str, Any] = dataset.remove_columns(['''label''', '''probability'''] )
_snake_case : int = dataset.rename_column('''prediction''' , '''label''' )
_snake_case : Optional[Any] = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
_snake_case : Tuple = dataset.shuffle(seed=args.seed )
_snake_case : Dict = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase_ , index=lowercase_ )
else:
dataset.to_json(lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) -> Union[str, Any]:
_snake_case : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case : Optional[int] = STModelArguments(model_name_or_path=lowercase_ )
_snake_case : Optional[int] = STDataArguments(train_file=lowercase_ , infer_file=lowercase_ )
_snake_case : Union[str, Any] = STTrainingArguments(output_dir=lowercase_ )
_snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase_ ).items():
setattr(lowercase_ , lowercase_ , lowercase_ )
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
# Sanity checks
_snake_case : Optional[Any] = {}
_snake_case : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case : int = args.train_file
_snake_case : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case : Optional[Any] = args.eval_file
for key in data_files:
_snake_case : Optional[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_snake_case : int = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_snake_case : Dict = f'''{args.output_dir}/self-train_iter-{{}}'''.format
_snake_case : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Dict = None
_snake_case : str = None
_snake_case : int = 0
_snake_case : Dict = False
# Show the progress bar
_snake_case : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case : Union[str, Any] = data_dir_format(lowercase_ )
assert os.path.exists(lowercase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case : List[Any] = os.path.join(lowercase_ , '''stage-1''' )
_snake_case : str = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase_ , lowercase_ ):
arguments_dict.update({key: value} )
_snake_case : List[Any] = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case : int = os.path.join(lowercase_ , '''best-checkpoint''' )
_snake_case : Any = os.path.join(lowercase_ , '''stage-2''' )
# Update arguments_dict
_snake_case : Dict = model_path
_snake_case : Union[str, Any] = data_files['''train''']
_snake_case : Optional[int] = current_output_dir
_snake_case : Dict = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase_ )
_snake_case : List[Any] = iteration
_snake_case : Any = data_dir_format(iteration + 1 )
_snake_case : Optional[int] = AutoConfig.from_pretrained(os.path.join(lowercase_ , '''best-checkpoint''' ) )
_snake_case : Union[str, Any] = config.idalabel
_snake_case : Tuple = os.path.join(lowercase_ , '''eval_results_best-checkpoint.json''' )
_snake_case : Any = os.path.join(lowercase_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase_ )
with open(lowercase_ , '''r''' ) as f:
_snake_case : Tuple = float(json.load(lowercase_ )[args.eval_metric] )
_snake_case : List[str] = os.path.join(lowercase_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase_ )
# Loading the dataset from local csv or json files.
_snake_case : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_snake_case : Dict = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase_ ):
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Tuple = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case : Tuple = eval_result
if best_iteration is None:
_snake_case : Union[str, Any] = new_iteration
_snake_case : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case : Optional[int] = new_iteration
_snake_case : Optional[int] = new_eval_result
_snake_case : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
_snake_case : int = new_iteration
_snake_case : str = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
| 326
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {"vocab_file": "spiece.model"}
__magic_name__ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
__magic_name__ = {"bert_for_seq_generation": 512}
class lowercase ( A__ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<::::>" , _snake_case = None , **_snake_case , ) -> None:
"""simple docstring"""
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , sep_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , _snake_case ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def snake_case_ ( self , _snake_case ) -> Tuple:
"""simple docstring"""
return self.sp_model.piece_to_id(_snake_case )
def snake_case_ ( self , _snake_case ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.sp_model.IdToPiece(_snake_case )
return token
def snake_case_ ( self , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
UpperCAmelCase = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def snake_case_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 391
|
__magic_name__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input('''Enter message: ''' )
UpperCAmelCase = input('''Enter key [alphanumeric]: ''' )
UpperCAmelCase = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase = '''encrypt'''
UpperCAmelCase = encrypt_message(A__ , A__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase = '''decrypt'''
UpperCAmelCase = decrypt_message(A__ , A__ )
print(F"""\n{mode.title()}ed message:""" )
print(A__ )
def _lowerCAmelCase ( A__: str , A__: str ):
'''simple docstring'''
return translate_message(A__ , A__ , '''encrypt''' )
def _lowerCAmelCase ( A__: str , A__: str ):
'''simple docstring'''
return translate_message(A__ , A__ , '''decrypt''' )
def _lowerCAmelCase ( A__: str , A__: str , A__: str ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = key.upper()
for symbol in message:
UpperCAmelCase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(A__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(A__ ):
UpperCAmelCase = 0
else:
translated.append(A__ )
return "".join(A__ )
if __name__ == "__main__":
main()
| 391
| 1
|
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCamelCase__ : Optional[Any] = 6_378_137.0
UpperCamelCase__ : Union[str, Any] = 6_356_752.314_245
UpperCamelCase__ : int = 6_378_137
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = (AXIS_A - AXIS_B) / AXIS_A
a = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
a = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
a = radians(snake_case_ )
a = radians(snake_case_ )
# Equation
a = sin((phi_a - phi_a) / 2 )
a = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 387
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = KandinskyVaaInpaintPipeline
A_ : str = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ : Optional[int] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : List[str] = False
@property
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
return self.time_input_dim
@property
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
return 100
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : int = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__magic_name__ : List[str] = UNetaDConditionModel(**_A )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ) -> str:
torch.manual_seed(0 )
__magic_name__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : str = self.dummy_unet
__magic_name__ : Tuple = self.dummy_movq
__magic_name__ : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , )
__magic_name__ : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Union[str, Any]=0 ) -> Optional[int]:
__magic_name__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
__magic_name__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : List[str] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
# create mask
__magic_name__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Optional[int] = 0
if str(_A ).startswith('mps' ):
__magic_name__ : Union[str, Any] = torch.manual_seed(_A )
else:
__magic_name__ : Any = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : Dict = 'cpu'
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : str = self.pipeline_class(**_A )
__magic_name__ : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__magic_name__ : Tuple = output.images
__magic_name__ : str = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
__magic_name__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Dict = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self : Any ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__magic_name__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__magic_name__ : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__magic_name__ : Optional[int] = 0
__magic_name__ : List[Any] = 'a hat'
__magic_name__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__magic_name__ : Dict = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__magic_name__ : List[Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__magic_name__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ , __magic_name__ : Any = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__magic_name__ : Optional[Any] = pipeline(
image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__magic_name__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 561
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowercase ( UpperCamelCase_ ):
@slow
@require_torch
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
UpperCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase = bertabert.config.encoder.vocab_size
UpperCAmelCase = tokenizer.sep_token_id
UpperCAmelCase = tokenizer.cls_token_id
UpperCAmelCase = 1_2_8
UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase = train_dataset.select(range(3_2 ) )
UpperCAmelCase = val_dataset.select(range(1_6 ) )
UpperCAmelCase = 4
def _map_to_encoder_decoder_inputs(__lowerCamelCase : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__A , max_length=5_1_2 )
UpperCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__A , max_length=1_2_8 )
UpperCAmelCase = inputs.input_ids
UpperCAmelCase = inputs.attention_mask
UpperCAmelCase = outputs.input_ids
UpperCAmelCase = outputs.input_ids.copy()
UpperCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase = outputs.attention_mask
assert all(len(__A ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__A ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowerCamelCase : Union[str, Any] ):
UpperCAmelCase = pred.label_ids
UpperCAmelCase = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A )
UpperCAmelCase = tokenizer.batch_decode(__A , skip_special_tokens=__A )
UpperCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = SeqaSeqTrainingArguments(
output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy="""steps""" , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase = SeqaSeqTrainer(
model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , )
# start training
trainer.train()
| 703
|
import numpy
class __lowercase :
def __init__( self : Union[str, Any] , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : numpy.ndarray ) -> None:
"""simple docstring"""
UpperCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCAmelCase = numpy.zeros(output_array.shape )
def _lowercase ( self : List[str] ) -> numpy.ndarray:
"""simple docstring"""
UpperCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowercase ( self : Optional[Any] ) -> None:
"""simple docstring"""
UpperCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowercase ( self : Any , __lowerCamelCase : numpy.ndarray , __lowerCamelCase : int , __lowerCamelCase : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
UpperCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def _lowercase ( self : List[str] , __lowerCamelCase : numpy.ndarray ) -> int:
"""simple docstring"""
UpperCAmelCase = input_arr
UpperCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def _UpperCamelCase ( lowerCAmelCase_ ) ->numpy.ndarray:
return (value) * (1 - (value))
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=lowerCAmelCase_ , output_array=lowerCAmelCase_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowerCAmelCase_ , iterations=1_0 , give_loss=lowerCAmelCase_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 627
| 0
|
from ...processing_utils import ProcessorMixin
class __lowercase ( __snake_case ):
_A = "WhisperFeatureExtractor"
_A = "WhisperTokenizer"
def __init__(self : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> List[Any]:
super().__init__(snake_case , snake_case )
_lowercase : Dict = self.feature_extractor
_lowercase : Dict = False
def _a(self : List[Any] , snake_case : Any=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=snake_case , language=snake_case , no_timestamps=snake_case )
def __call__(self : int , *snake_case : Optional[Any] , **snake_case : List[str] ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case , **snake_case )
_lowercase : Optional[int] = kwargs.pop("audio" , snake_case )
_lowercase : Optional[int] = kwargs.pop("sampling_rate" , snake_case )
_lowercase : Optional[int] = kwargs.pop("text" , snake_case )
if len(snake_case ) > 0:
_lowercase : List[str] = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowercase : Tuple = self.feature_extractor(snake_case , *snake_case , sampling_rate=snake_case , **snake_case )
if text is not None:
_lowercase : str = self.tokenizer(snake_case , **snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Optional[Any] = encodings["input_ids"]
return inputs
def _a(self : List[str] , *snake_case : Union[str, Any] , **snake_case : Optional[Any] ) -> List[str]:
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def _a(self : str , *snake_case : Union[str, Any] , **snake_case : Dict ) -> Optional[int]:
return self.tokenizer.decode(*snake_case , **snake_case )
def _a(self : Tuple , snake_case : str , snake_case : List[str]="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(snake_case , return_tensors=snake_case )
| 461
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __lowercase ( __snake_case ):
def __init__(self : Dict , snake_case : str , snake_case : str=13 , snake_case : Union[str, Any]=7 , snake_case : int=True , snake_case : Any=True , snake_case : str=False , snake_case : Optional[Any]=True , snake_case : Optional[Any]=99 , snake_case : Dict=32 , snake_case : Union[str, Any]=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : Optional[int]="gelu" , snake_case : Optional[Any]=0.1 , snake_case : Optional[int]=0.1 , snake_case : List[Any]=512 , snake_case : List[Any]=16 , snake_case : Optional[int]=2 , snake_case : Tuple=0.02 , snake_case : Union[str, Any]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> List[Any]:
_lowercase : Dict = parent
_lowercase : int = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : int = is_training
_lowercase : Dict = use_input_mask
_lowercase : Union[str, Any] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : int = num_hidden_layers
_lowercase : Optional[Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Dict = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Tuple = max_position_embeddings
_lowercase : str = type_vocab_size
_lowercase : int = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : Optional[Any] = num_choices
_lowercase : str = scope
def _a(self : int ) -> Dict:
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Tuple = None
if self.use_input_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Tuple = None
_lowercase : Union[str, Any] = None
_lowercase : Tuple = None
if self.use_labels:
_lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a(self : Any ) -> Any:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : List[str] , snake_case : Tuple , snake_case : Any , snake_case : Dict ) -> Optional[int]:
_lowercase : Optional[int] = DistilBertModel(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[Any] = model(snake_case , snake_case )
_lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a(self : int , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
_lowercase : Optional[int] = DistilBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a(self : Tuple , snake_case : List[str] , snake_case : Any , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : str ) -> Any:
_lowercase : Dict = DistilBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : List[str] = model(
snake_case , attention_mask=snake_case , start_positions=snake_case , end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a(self : Union[str, Any] , snake_case : str , snake_case : Dict , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Dict ) -> Dict:
_lowercase : str = self.num_labels
_lowercase : Any = DistilBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a(self : int , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str ) -> str:
_lowercase : str = self.num_labels
_lowercase : List[str] = DistilBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : str = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a(self : List[str] , snake_case : int , snake_case : str , snake_case : Union[str, Any] , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] ) -> Optional[Any]:
_lowercase : str = self.num_choices
_lowercase : Dict = DistilBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Tuple = model(
snake_case , attention_mask=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a(self : List[str] ) -> List[str]:
_lowercase : Union[str, Any] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Union[str, Any] = config_and_inputs
_lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_A = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A = True
_A = True
_A = True
_A = True
def _a(self : Dict ) -> List[Any]:
_lowercase : Optional[Any] = DistilBertModelTester(self )
_lowercase : str = ConfigTester(self , config_class=snake_case , dim=37 )
def _a(self : int ) -> List[str]:
self.config_tester.run_common_tests()
def _a(self : Optional[Any] ) -> Optional[int]:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*snake_case )
def _a(self : Any ) -> int:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*snake_case )
def _a(self : Dict ) -> List[Any]:
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*snake_case )
def _a(self : str ) -> Tuple:
_lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*snake_case )
def _a(self : Any ) -> List[Any]:
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*snake_case )
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*snake_case )
@slow
def _a(self : Optional[Any] ) -> Dict:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = DistilBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowercase : str = True
_lowercase : Tuple = model_class(config=snake_case )
_lowercase : str = self._prepare_for_class(snake_case , snake_case )
_lowercase : Optional[int] = torch.jit.trace(
snake_case , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , "traced_model.pt" ) )
_lowercase : Dict = torch.jit.load(os.path.join(snake_case , "traced_model.pt" ) , map_location=snake_case )
loaded(inputs_dict["input_ids"].to(snake_case ) , inputs_dict["attention_mask"].to(snake_case ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def _a(self : int ) -> str:
_lowercase : Any = DistilBertModel.from_pretrained("distilbert-base-uncased" )
_lowercase : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowercase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
_lowercase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
_lowercase : Any = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1e-4 ) )
| 461
| 1
|
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def UpperCamelCase_ ( lowerCamelCase : str ) -> str:
"""simple docstring"""
if "://" in dataset_path:
__magic_name__ : int = dataset_path.split('''://''' )[1]
return dataset_path
def UpperCamelCase_ ( lowerCamelCase : fsspec.AbstractFileSystem ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def UpperCamelCase_ ( lowerCamelCase : fsspec.AbstractFileSystem , lowerCamelCase : str , lowerCamelCase : str ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = not is_remote_filesystem(lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase ) , fs._strip_protocol(lowerCamelCase ) )
else:
fs.mv(lowerCamelCase , lowerCamelCase , recursive=lowerCamelCase )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__magic_name__ : List[str] = None
__magic_name__ : int = None
__magic_name__ : List[Any] = threading.Lock()
| 147
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = ['note_seq']
def __init__( self : List[Any] , *snake_case : Any , **snake_case : Any ) -> Any:
'''simple docstring'''
requires_backends(self , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *snake_case : Optional[int] , **snake_case : str ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *snake_case : Optional[int] , **snake_case : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''note_seq'''] )
| 147
| 1
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowercase ( __A , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = CpmAntTokenizer
lowerCamelCase : Optional[Any] = False
def lowercase__ ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Tuple = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
SCREAMING_SNAKE_CASE__ : List[Any] = '今天天气真好!'
SCREAMING_SNAKE_CASE__ : List[Any] = ['今天', '天气', '真', '好', '!']
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = '今天天气真好!'
SCREAMING_SNAKE_CASE__ : Optional[Any] = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE__ : List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.decode(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 35
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 220
| 0
|
"""simple docstring"""
import numpy as np
def lowerCamelCase ( _snake_case ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
UpperCamelCase : int
UpperCamelCase : int
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )]
UpperCAmelCase__ : Union[str, Any] = size
def __getitem__( self , UpperCamelCase_ ):
return iter(self._graph[vertex] )
@property
def __snake_case ( self ):
return self._size
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = deque([start_vertex] )
UpperCAmelCase__ : list[int | None] = [None] * self.size
UpperCAmelCase__ : List[str] = 0
while queue:
UpperCAmelCase__ : Dict = queue.popleft()
UpperCAmelCase__ : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase__ : Optional[int] = current_distance + edge.weight
UpperCAmelCase__ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase__ : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254
| 1
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCamelCase__ (_UpperCAmelCase):
return getitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return setitem, k, v
def lowerCamelCase__ (_UpperCAmelCase):
return delitem, k
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase), None
except Exception as e:
return None, e
a_ : Dict = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
a_ : Any = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
a_ : Any = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
a_ : str = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
a_ : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items'),
pytest.param(_overwrite_items , id='overwrite items'),
pytest.param(_delete_items , id='delete items'),
pytest.param(_access_absent_items , id='access absent items'),
pytest.param(_add_with_resize_up , id='add with resize up'),
pytest.param(_add_with_resize_down , id='add with resize down'),
) , )
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = HashMap(initial_block_size=4)
SCREAMING_SNAKE_CASE = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase)
assert my_res == py_res
assert str(_UpperCAmelCase) == str(_UpperCAmelCase)
assert set(_UpperCAmelCase) == set(_UpperCAmelCase)
assert len(_UpperCAmelCase) == len(_UpperCAmelCase)
assert set(my.items()) == set(py.items())
def lowerCamelCase__ ():
def is_public(_UpperCAmelCase) -> bool:
return not name.startswith('_')
SCREAMING_SNAKE_CASE = {name for name in dir({}) if is_public(_UpperCAmelCase)}
SCREAMING_SNAKE_CASE = {name for name in dir(HashMap()) if is_public(_UpperCAmelCase)}
assert dict_public_names > hash_public_names
| 73
|
"""simple docstring"""
import string
def A_ ( snake_case_ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
UpperCamelCase : Optional[int] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCamelCase : Optional[int] = string.ascii_uppercase.find(snake_case_ )
UpperCamelCase : str = num - key
if num < 0:
UpperCamelCase : Optional[int] = num + len(string.ascii_uppercase )
UpperCamelCase : Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCamelCase : List[str] = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def A_ ( ):
'''simple docstring'''
UpperCamelCase : List[Any] = input("""Encrypted message: """ )
UpperCamelCase : str = message.upper()
decrypt(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 499
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__UpperCamelCase = random.Random()
def UpperCamelCase_( _A :Tuple , _A :str=1.0 , _A :int=None , _A :Dict=None )-> str:
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=400 , snake_case=2000 , snake_case=10 , snake_case=160 , snake_case=8 , snake_case=0.0 , snake_case=4000 , snake_case=False , snake_case=True , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
UpperCamelCase__ = feature_size
UpperCamelCase__ = chunk_length
UpperCamelCase__ = hop_length
def snake_case__ ( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self , snake_case=False , snake_case=False ):
'''simple docstring'''
def _flatten(snake_case ):
return list(itertools.chain(*snake_case ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = WhisperFeatureExtractionTester(self )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
UpperCamelCase__ = self.feature_extraction_class.from_pretrained(snake_case )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
UpperCamelCase__ = self.feature_extraction_class.from_json_file(snake_case )
UpperCamelCase__ = feat_extract_first.to_dict()
UpperCamelCase__ = feat_extract_second.to_dict()
UpperCamelCase__ = feat_extract_first.mel_filters
UpperCamelCase__ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(snake_case , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(snake_case )
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
# Test truncation required
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs]
UpperCamelCase__ = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCamelCase__ = [np.asarray(snake_case ) for speech_input in speech_inputs_truncated]
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case , snake_case ):
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-3 ) )
def snake_case__ ( self ):
'''simple docstring'''
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = WhisperFeatureExtractor()
UpperCamelCase__ = feature_extractor(snake_case , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case , atol=1E-4 ) )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = self._load_datasamples(1 )[0]
UpperCamelCase__ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
UpperCamelCase__ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case )[0]
self.assertTrue(np.all(np.mean(snake_case ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case ) - 1 ) < 1E-3 ) )
| 185
|
def UpperCamelCase_( _A :str )-> int:
UpperCamelCase__ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCamelCase__ = hex_num[0] == "-"
if is_negative:
UpperCamelCase__ = hex_num[1:]
try:
UpperCamelCase__ = int(_A , 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCamelCase__ = ""
while int_num > 0:
UpperCamelCase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = StableDiffusionPanoramaPipeline
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, )
snake_case__ : int = DDIMScheduler()
torch.manual_seed(0 )
snake_case__ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
snake_case__ : Optional[Any] = CLIPTextModel(_snake_case )
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowercase_ ( self : Union[str, Any], _snake_case : Dict, _snake_case : Tuple=0 ) ->Tuple:
snake_case__ : List[str] = torch.manual_seed(_snake_case )
snake_case__ : Union[str, Any] = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self : Optional[Any] ) ->Tuple:
snake_case__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = StableDiffusionPanoramaPipeline(**_snake_case )
snake_case__ : Dict = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(_snake_case )
snake_case__ : Dict = sd_pipe(**_snake_case ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Union[str, Any] = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[str] ) ->Optional[Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self : List[Any] ) ->Union[str, Any]:
super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3.25e-3 )
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : Union[str, Any] = StableDiffusionPanoramaPipeline(**_snake_case )
snake_case__ : Optional[Any] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : List[Any] = self.get_dummy_inputs(_snake_case )
snake_case__ : Optional[int] = """french fries"""
snake_case__ : List[str] = sd_pipe(**_snake_case, negative_prompt=_snake_case )
snake_case__ : Optional[Any] = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Dict ) ->Tuple:
snake_case__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : Dict = StableDiffusionPanoramaPipeline(**_snake_case )
snake_case__ : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Any = self.get_dummy_inputs(_snake_case )
snake_case__ : List[str] = sd_pipe(**_snake_case, view_batch_size=2 )
snake_case__ : Dict = output.images
snake_case__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Optional[Any] = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : List[Any] ) ->Optional[Any]:
snake_case__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[int] = self.get_dummy_components()
snake_case__ : List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear' )
snake_case__ : Optional[Any] = StableDiffusionPanoramaPipeline(**_snake_case )
snake_case__ : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[int] = self.get_dummy_inputs(_snake_case )
snake_case__ : Tuple = sd_pipe(**_snake_case ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Union[str, Any] ) ->str:
snake_case__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Union[str, Any] = PNDMScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear', skip_prk_steps=_snake_case )
snake_case__ : List[str] = StableDiffusionPanoramaPipeline(**_snake_case )
snake_case__ : int = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : List[str] = self.get_dummy_inputs(_snake_case )
snake_case__ : int = sd_pipe(**_snake_case ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) ->int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : int, _snake_case : Dict=0 ) ->Optional[Any]:
snake_case__ : Dict = torch.manual_seed(_snake_case )
snake_case__ : Tuple = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
snake_case__ : Union[str, Any] = DDIMScheduler.from_pretrained(_snake_case, subfolder='scheduler' )
snake_case__ : List[str] = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case, scheduler=_snake_case, safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = self.get_inputs()
snake_case__ : Tuple = pipe(**_snake_case ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
snake_case__ : List[Any] = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase_ ( self : int ) ->Optional[Any]:
snake_case__ : str = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base', safety_checker=_snake_case )
snake_case__ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**_snake_case ).images
snake_case__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
snake_case__ : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase_ ( self : Optional[int] ) ->str:
snake_case__ : Dict = 0
def callback_fn(_snake_case : int, _snake_case : int, _snake_case : torch.FloatTensor ) -> None:
snake_case__ : Union[str, Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
snake_case__ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case__ : Any = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
snake_case__ : Union[str, Any] = latents[0, -3:, -3:, -1]
snake_case__ : int = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : str = False
snake_case__ : Union[str, Any] = """stabilityai/stable-diffusion-2-base"""
snake_case__ : Optional[int] = DDIMScheduler.from_pretrained(_snake_case, subfolder='scheduler' )
snake_case__ : int = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case, scheduler=_snake_case, safety_checker=_snake_case )
snake_case__ : List[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
pipe(**_snake_case, callback=_snake_case, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ ( self : List[Any] ) ->List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : int = """stabilityai/stable-diffusion-2-base"""
snake_case__ : List[str] = DDIMScheduler.from_pretrained(_snake_case, subfolder='scheduler' )
snake_case__ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case, scheduler=_snake_case, safety_checker=_snake_case )
snake_case__ : List[str] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : str = self.get_inputs()
snake_case__ : Union[str, Any] = pipe(**_snake_case )
snake_case__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 478
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase ( unittest.TestCase ):
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCAmelCase__ : Tuple = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCAmelCase__ : int = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16_000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname ,A )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
# load decoder from hub
UpperCAmelCase__ : int = """hf-internal-testing/ngram-beam-search-decoder"""
def __lowercase ( self : str ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : List[str] ,**A : Dict ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**A )
def __lowercase ( self : Any ,**A : List[Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**A )
def __lowercase ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : str = self.get_decoder()
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(A ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=A ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Any = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : str = floats_list((3, 1_000) )
UpperCAmelCase__ : Optional[Any] = feature_extractor(A ,return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(A ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_feature_extractor()
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = self.get_decoder()
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : List[Any] = """This is a test string"""
UpperCAmelCase__ : int = processor(text=A )
UpperCAmelCase__ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Tuple ,A : List[Any]=(2, 10, 16) ,A : Dict=77 ):
'''simple docstring'''
np.random.seed(A )
return np.random.rand(*A )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase__ : Tuple = processor.decode(A )
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams(A )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def __lowercase ( self : List[str] ,A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase__ : List[str] = processor.batch_decode(A )
else:
with get_context(A ).Pool() as pool:
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(A ,A )
UpperCAmelCase__ : Optional[Any] = list(A )
with get_context("""fork""" ).Pool() as p:
UpperCAmelCase__ : Union[str, Any] = decoder.decode_beams_batch(A ,A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(A ,decoded_processor.logit_score )
self.assertListEqual(A ,decoded_processor.lm_score )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Dict = self._get_dummy_logits()
UpperCAmelCase__ : Any = 15
UpperCAmelCase__ : Dict = -2_0.0
UpperCAmelCase__ : List[Any] = -4.0
UpperCAmelCase__ : Union[str, Any] = processor.batch_decode(
A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[str] = decoded_processor_out.text
UpperCAmelCase__ : List[str] = list(A )
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : Tuple = decoder.decode_beams_batch(
A ,A ,beam_width=A ,beam_prune_logp=A ,token_min_logp=A ,)
UpperCAmelCase__ : List[Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase__ : Any = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase__ : List[str] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,A )
self.assertTrue(np.array_equal(A ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] ,A ,atol=1e-3 ) )
self.assertTrue(np.array_equal(A ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] ,A ,atol=1e-3 ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_feature_extractor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_decoder()
UpperCAmelCase__ : str = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
UpperCAmelCase__ : Tuple = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = 2.0
UpperCAmelCase__ : str = 5.0
UpperCAmelCase__ : Union[str, Any] = -2_0.0
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : str = processor.batch_decode(
A ,alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
UpperCAmelCase__ : Any = decoded_processor_out.text
UpperCAmelCase__ : Union[str, Any] = list(A )
decoder.reset_params(
alpha=A ,beta=A ,unk_score_offset=A ,lm_score_boundary=A ,)
with get_context("""fork""" ).Pool() as pool:
UpperCAmelCase__ : List[Any] = decoder.decode_beams_batch(
A ,A ,)
UpperCAmelCase__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A ,A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,A )
UpperCAmelCase__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-2_0.0 )
self.assertEqual(lm_model.score_boundary ,A )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : str = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Any = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Optional[int] = os.listdir(A )
UpperCAmelCase__ : List[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A ,A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(A )
UpperCAmelCase__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase__ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCAmelCase__ : Tuple = os.listdir(A )
UpperCAmelCase__ : Dict = os.listdir(A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A ,A )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = floats_list((3, 1_000) )
UpperCAmelCase__ : List[str] = processor_wavaveca(A ,return_tensors="""np""" )
UpperCAmelCase__ : Dict = processor_auto(A ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase__ : List[str] = self._get_dummy_logits()
UpperCAmelCase__ : Tuple = processor_wavaveca.batch_decode(A )
UpperCAmelCase__ : List[str] = processor_auto.batch_decode(A )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_feature_extractor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = self.get_decoder()
UpperCAmelCase__ : int = WavaVecaProcessorWithLM(tokenizer=A ,feature_extractor=A ,decoder=A )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def __lowercase ( A : Optional[Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : Dict = self._get_dummy_logits()[0]
UpperCAmelCase__ : List[str] = processor.decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCAmelCase__ : int = self._get_dummy_logits()
UpperCAmelCase__ : Any = processor.batch_decode(A ,output_word_offsets=A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(A ,A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(A ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __lowercase ( self : Tuple ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=A )
UpperCAmelCase__ : Tuple = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase__ : Tuple = iter(A )
UpperCAmelCase__ : Optional[int] = next(A )
UpperCAmelCase__ : List[Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCAmelCase__ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase__ : Tuple = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(A ).logits.cpu().numpy()
UpperCAmelCase__ : Any = processor.decode(logits[0] ,output_word_offsets=A )
UpperCAmelCase__ : str = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase__ : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCAmelCase__ : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,A )
self.assertEqual(""" """.join(self.get_from_offsets(A ,"""word""" ) ) ,output.text )
# output times
UpperCAmelCase__ : str = torch.tensor(self.get_from_offsets(A ,"""start_time""" ) )
UpperCAmelCase__ : List[Any] = torch.tensor(self.get_from_offsets(A ,"""end_time""" ) )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase__ : List[Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
self.assertTrue(torch.allclose(A ,A ,atol=0.0_1 ) )
| 65
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
_lowercase = ''' def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
'''
class __A ( unittest.TestCase ):
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
lowerCamelCase__ : List[str] = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase_ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ):
lowerCamelCase__ : Optional[Any] = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
lowerCamelCase__ : Dict = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
lowerCamelCase__ : Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCamelCase__ : Any = black.format_str(UpperCamelCase_ , mode=UpperCamelCase_ )
lowerCamelCase__ : List[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(UpperCamelCase_ , """w""" , newline="""\n""" ) as f:
f.write(UpperCamelCase_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase_ )
with open(UpperCamelCase_ , """r""" ) as f:
self.assertTrue(f.read() , UpperCamelCase_ )
def _snake_case (self ):
lowerCamelCase__ : str = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case (self ):
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , UpperCamelCase_ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , UpperCamelCase_ ) , )
# Copy consistency with a really long name
lowerCamelCase__ : Optional[Any] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("""Bert""" , UpperCamelCase_ , UpperCamelCase_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , UpperCamelCase_ , overwrite_result=re.sub("""Bert""" , """TestModel""" , UpperCamelCase_ ) , )
def _snake_case (self ):
lowerCamelCase__ : List[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
lowerCamelCase__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
lowerCamelCase__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ : Union[str, Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
lowerCamelCase__ : List[Any] = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["""format_model_list"""] )
self.assertFalse(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase__ : str = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase_ )
lowerCamelCase__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
lowerCamelCase__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ : int = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
lowerCamelCase__ : Tuple = check_copies.convert_to_localized_md(
UpperCamelCase_ , UpperCamelCase_ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
| 704
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_lowercase = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _A (UpperCamelCase : Any ) ->List[str]:
'''simple docstring'''
lowerCamelCase__ : List[str] = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
_lowercase = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _A (UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
lowerCamelCase__ : Dict = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase__ : Tuple = new_key.replace(UpperCamelCase , UpperCamelCase )
print(f"{key} -> {new_key}" )
lowerCamelCase__ : int = s_dict.pop(UpperCamelCase )
return s_dict
def _A (UpperCamelCase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ : int = emb.weight.shape
lowerCamelCase__ : Optional[int] = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : List[str] = emb.weight.data
return lin_layer
def _A (UpperCamelCase : str , UpperCamelCase : str ) ->bytes:
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
lowerCamelCase__ : Any = os.path.basename(UpperCamelCase )
lowerCamelCase__ : int = url.split("""/""" )[-2]
lowerCamelCase__ : Optional[int] = os.path.join(UpperCamelCase , UpperCamelCase )
if os.path.exists(UpperCamelCase ) and not os.path.isfile(UpperCamelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(UpperCamelCase ):
lowerCamelCase__ : Dict = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(UpperCamelCase ) as source, open(UpperCamelCase , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=UpperCamelCase , unit_divisor=1024 ) as loop:
while True:
lowerCamelCase__ : Tuple = source.read(8192 )
if not buffer:
break
output.write(UpperCamelCase )
loop.update(len(UpperCamelCase ) )
lowerCamelCase__ : Any = open(UpperCamelCase , """rb""" ).read()
if hashlib.shaaaa(UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _A (UpperCamelCase : Dict , UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase__ : Optional[Any] = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase__ : Optional[Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = original_checkpoint["""dims"""]
lowerCamelCase__ : int = original_checkpoint["""model_state_dict"""]
lowerCamelCase__ : Optional[int] = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(UpperCamelCase )
rename_keys(UpperCamelCase )
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowerCamelCase__ : List[str] = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=UpperCamelCase , decoder_ffn_dim=UpperCamelCase , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowerCamelCase__ : List[str] = WhisperForConditionalGeneration(UpperCamelCase )
lowerCamelCase__ ,lowerCamelCase__ : str = model.model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
if len(UpperCamelCase ) > 0 and not set(UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowerCamelCase__ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase__ : int = proj_out_weights
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_lowercase = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 96
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase_ ( a_ ):
def __init__( self : Dict , *_lowercase : Optional[int] , **_lowercase : Tuple ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 308
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a ( UpperCAmelCase ):
_lowercase = "conditional_detr"
_lowercase = ["past_key_values"]
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , A_=True , A_=None , A_=3 , A_=300 , A_=6 , A_=2048 , A_=8 , A_=6 , A_=2048 , A_=8 , A_=0.0 , A_=0.0 , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1.0 , A_=False , A_="sine" , A_="resnet50" , A_=True , A_=False , A_=2 , A_=5 , A_=2 , A_=1 , A_=1 , A_=2 , A_=5 , A_=2 , A_=0.25 , **A_ , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(A_ , A_ ):
_UpperCAmelCase : Optional[Any] = backbone_config.get("model_type" )
_UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : Dict = config_class.from_dict(A_ )
_UpperCAmelCase : Any = use_timm_backbone
_UpperCAmelCase : List[Any] = backbone_config
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : int = num_queries
_UpperCAmelCase : Union[str, Any] = d_model
_UpperCAmelCase : Dict = encoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Optional[int] = decoder_ffn_dim
_UpperCAmelCase : str = decoder_layers
_UpperCAmelCase : Optional[Any] = decoder_attention_heads
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : List[Any] = activation_dropout
_UpperCAmelCase : List[str] = activation_function
_UpperCAmelCase : Optional[int] = init_std
_UpperCAmelCase : List[Any] = init_xavier_std
_UpperCAmelCase : Optional[int] = encoder_layerdrop
_UpperCAmelCase : List[str] = decoder_layerdrop
_UpperCAmelCase : Optional[int] = encoder_layers
_UpperCAmelCase : Union[str, Any] = auxiliary_loss
_UpperCAmelCase : str = position_embedding_type
_UpperCAmelCase : str = backbone
_UpperCAmelCase : int = use_pretrained_backbone
_UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Tuple = bbox_cost
_UpperCAmelCase : Dict = giou_cost
# Loss coefficients
_UpperCAmelCase : Any = mask_loss_coefficient
_UpperCAmelCase : int = dice_loss_coefficient
_UpperCAmelCase : Any = cls_loss_coefficient
_UpperCAmelCase : Any = bbox_loss_coefficient
_UpperCAmelCase : Optional[int] = giou_loss_coefficient
_UpperCAmelCase : List[Any] = focal_alpha
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCAmelCase : Tuple = self.backbone_config.to_dict()
_UpperCAmelCase : Tuple = self.__class__.model_type
return output
class a ( UpperCAmelCase ):
_lowercase = version.parse("1.11" )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 1e-5
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return 12
| 300
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
lowercase__ = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowercase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
lowercase__ = requests.get(url + F"""&page={i + 2}""" , headers=lowerCamelCase_ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ ).json()
lowercase__ = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
lowercase__ = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCamelCase_ ):
lowercase__ = requests.get(url + F"""&page={i + 2}""" , headers=lowerCamelCase_ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = None
if token is not None:
lowercase__ = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__ = requests.get(lowerCamelCase_ , headers=lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
lowercase__ = result.headers['''Location''']
lowercase__ = requests.get(lowerCamelCase_ , allow_redirects=lowerCamelCase_ )
lowercase__ = os.path.join(lowerCamelCase_ , F"""{artifact_name}.zip""" )
with open(lowerCamelCase_ , '''wb''' ) as fp:
fp.write(response.content )
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
lowercase__ = None
with zipfile.ZipFile(lowerCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCamelCase_ ) as f:
for line in f:
lowercase__ = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase__ = line[: line.index(''': ''' )]
lowercase__ = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
lowercase__ = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCamelCase_ )
elif filename == "job_name.txt":
lowercase__ = line
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCamelCase_ )} for `errors` """
F"""and {len(lowerCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
''' problem.''' )
lowercase__ = None
if job_name and job_links:
lowercase__ = job_links.get(lowerCamelCase_ , lowerCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
lowercase__ = [x + [y] + [job_link] for x, y in zip(lowerCamelCase_ , lowerCamelCase_ )]
return result
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = []
lowercase__ = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for p in os.listdir(lowerCamelCase_ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCamelCase_ , job_links=lowerCamelCase_ ) )
return errors
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = Counter()
counter.update([x[1] for x in logs] )
lowercase__ = counter.most_common()
lowercase__ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase__ = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase__ = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
lowercase__ = test.split('''/''' )[2]
else:
lowercase__ = None
return test
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase__ = [x for x in logs if x[2] is not None]
lowercase__ = {x[2] for x in logs}
lowercase__ = {}
for test in tests:
lowercase__ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase__ = counter.most_common()
lowercase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase__ = sum(error_counts.values() )
if n_errors > 0:
lowercase__ = {'''count''': n_errors, '''errors''': error_counts}
lowercase__ = dict(sorted(r.items() , key=lambda lowerCamelCase_ : item[1]["count"] , reverse=lowerCamelCase_ ) )
return r
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''| no. | error | status |'''
lowercase__ = '''|-:|:-|:-|'''
lowercase__ = [header, sep]
for error in reduced_by_error:
lowercase__ = reduced_by_error[error]['''count''']
lowercase__ = F"""| {count} | {error[:100]} | |"""
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = '''| model | no. of errors | major error | count |'''
lowercase__ = '''|-:|-:|-:|-:|'''
lowercase__ = [header, sep]
for model in reduced_by_model:
lowercase__ = reduced_by_model[model]['''count''']
lowercase__ , lowercase__ = list(reduced_by_model[model]['''errors'''].items() )[0]
lowercase__ = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(lowerCamelCase_ )
return "\n".join(lowerCamelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
A__ : List[Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A__ : Any = get_job_links(args.workflow_run_id, token=args.token)
A__ : Dict = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A__ : int = k.find(' / ')
A__ : Optional[Any] = k[index + len(' / ') :]
A__ : Any = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A__ : int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A__ : str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A__ : Optional[int] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A__ : Dict = reduce_by_error(errors)
A__ : List[str] = reduce_by_model(errors)
A__ : Any = make_github_table(reduced_by_error)
A__ : Dict = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 671
|
from itertools import count
def a ( lowerCamelCase_ = 50 ):
'''simple docstring'''
lowercase__ = [1] * min_block_length
for n in count(lowerCamelCase_ ):
fill_count_functions.append(1 )
for block_length in range(lowerCamelCase_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"{solution() = }")
| 671
| 1
|
def lowercase_ ( __snake_case : int , __snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ :int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase_ ( __snake_case : Any , __snake_case : Dict , __snake_case : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case__ :Optional[Any] = 0
while b > 0:
if b & 1:
snake_case__ :Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 241
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowercase_ ( __snake_case : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __snake_case : str ) -> Tuple:
'''simple docstring'''
for char in word:
snake_case__ :Dict = ord(__snake_case )
if not _is_chinese_char(__snake_case ):
return 0
return 1
def lowercase_ ( __snake_case : List[str] ) -> Any:
'''simple docstring'''
snake_case__ :Optional[int] = set()
for token in tokens:
snake_case__ :Dict = len(__snake_case ) > 1 and is_chinese(__snake_case )
if chinese_word:
word_set.add(__snake_case )
snake_case__ :Tuple = list(__snake_case )
return word_list
def lowercase_ ( __snake_case : List[str] , __snake_case : set() ) -> int:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
snake_case__ :List[str] = max([len(__snake_case ) for w in chinese_word_set] )
snake_case__ :str = bert_tokens
snake_case__ , snake_case__ :Dict = 0, len(__snake_case )
while start < end:
snake_case__ :Any = True
if is_chinese(bert_word[start] ):
snake_case__ :Union[str, Any] = min(end - start , __snake_case )
for i in range(__snake_case , 1 , -1 ):
snake_case__ :str = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case__ :int = "##" + bert_word[j]
snake_case__ :str = start + i
snake_case__ :Union[str, Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> List[Any]:
'''simple docstring'''
snake_case__ :Union[str, Any] = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :Any = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws
snake_case__ :Optional[Any] = [get_chinese_word(__snake_case ) for r in res]
ltp_res.extend(__snake_case )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :int = []
for i in range(0 , len(__snake_case ) , 1_00 ):
snake_case__ :str = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__snake_case , truncation=__snake_case , max_length=5_12 )
bert_res.extend(res["input_ids"] )
assert len(__snake_case ) == len(__snake_case )
snake_case__ :Union[str, Any] = []
for input_ids, chinese_word in zip(__snake_case , __snake_case ):
snake_case__ :Dict = []
for id in input_ids:
snake_case__ :Tuple = bert_tokenizer._convert_id_to_token(__snake_case )
input_tokens.append(__snake_case )
snake_case__ :Tuple = add_sub_symbol(__snake_case , __snake_case )
snake_case__ :Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__snake_case ):
if token[:2] == "##":
snake_case__ :Optional[Any] = token[2:]
# save chinese tokens' pos
if len(__snake_case ) == 1 and _is_chinese_char(ord(__snake_case ) ):
ref_id.append(__snake_case )
ref_ids.append(__snake_case )
assert len(__snake_case ) == len(__snake_case )
return ref_ids
def lowercase_ ( __snake_case : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
snake_case__ :Optional[int] = f.readlines()
snake_case__ :Union[str, Any] = [line.strip() for line in data if len(__snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case__ :Optional[int] = LTP(args.ltp ) # faster in GPU device
snake_case__ :Optional[int] = BertTokenizer.from_pretrained(args.bert )
snake_case__ :str = prepare_ref(__snake_case , __snake_case , __snake_case )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
snake_case__ :List[str] = [json.dumps(__snake_case ) + "\n" for ref in ref_ids]
f.writelines(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__UpperCAmelCase : str = parser.parse_args()
main(args)
| 241
| 1
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a__ :
A = 42
A = None
A = None
__lowerCamelCase : int = namedtuple('''CoinsDistribResult''', '''moves excess''')
def _snake_case ( lowerCAmelCase : TreeNode | None ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(lowerCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCAmelCase ) != count_coins(lowerCAmelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(lowerCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE_ : Any = get_distrib(node.left )
SCREAMING_SNAKE_CASE_ : Dict = get_distrib(node.right )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE_ : str = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE_ : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCAmelCase )
+ abs(lowerCAmelCase )
)
SCREAMING_SNAKE_CASE_ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCAmelCase , lowerCAmelCase )
return get_distrib(lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( A__ , unittest.TestCase ):
A = BioGptTokenizer
A = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
SCREAMING_SNAKE_CASE_ : List[Any] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w" ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file,"w" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : List[str],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "lower newer"
SCREAMING_SNAKE_CASE_ : Optional[int] = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptTokenizer(self.vocab_file,self.merges_file )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "lower"
SCREAMING_SNAKE_CASE_ : int = ["low", "er</w>"]
SCREAMING_SNAKE_CASE_ : int = tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : int = tokens + ["<unk>"]
SCREAMING_SNAKE_CASE_ : int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),_A )
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode("sequence builders",add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode("multi-sequence build",add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.build_inputs_with_special_tokens(_A,_A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 316
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowercase ( __lowerCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__(self , __a=30522 , __a=768 , __a=12 , __a=12 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=2 , __a=0.02 , __a=1E-1_2 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = classifier_dropout
class lowercase ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ (self ) -> int:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 146
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
UpperCAmelCase__ : Union[str, Any] = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
| 0
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Tuple = [randint(-1000, 1000 ) for i in range(10 )]
_SCREAMING_SNAKE_CASE : int = randint(-5000, 5000 )
return (arr, r)
UpperCamelCase__ =make_dataset()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
for triplet in permutations(__lowerCamelCase, 3 ):
if sum(__lowerCamelCase ) == target:
return tuple(sorted(__lowerCamelCase ) )
return (0, 0, 0)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
arr.sort()
_SCREAMING_SNAKE_CASE : List[str] = len(__lowerCamelCase )
for i in range(n - 1 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Optional[int] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
_SCREAMING_SNAKE_CASE : Dict = "\ntriplet_sum1(*dataset)\n"
_SCREAMING_SNAKE_CASE : str = "\ntriplet_sum2(*dataset)\n"
_SCREAMING_SNAKE_CASE : Optional[int] = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 )
_SCREAMING_SNAKE_CASE : Tuple = repeat(setup=__lowerCamelCase, stmt=__lowerCamelCase, repeat=5, number=10000 )
return (min(__lowerCamelCase ), min(__lowerCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase__ =solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 717
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCamelCase ) - 1
def UpperCamelCase_ ( self , __lowerCamelCase ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__lowerCamelCase ) , 5 ) == 1
return output_values
def UpperCamelCase_ ( self , __lowerCamelCase ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
_SCREAMING_SNAKE_CASE : Optional[int] = self.basis_function(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = 0.0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self , __lowerCamelCase = 0.01 ) -> int:
from matplotlib import pyplot as plt # type: ignore
_SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
_SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
_SCREAMING_SNAKE_CASE : Dict = 0.0
while t <= 1:
_SCREAMING_SNAKE_CASE : str = self.bezier_curve_function(__lowerCamelCase )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
_SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
_SCREAMING_SNAKE_CASE : Dict = [i[1] for i in self.list_of_points]
plt.plot(
__lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , )
plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 381
| 0
|
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(__lowerCAmelCase ).json()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 10 ):
snake_case__ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case__ = requests.get(__lowerCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = 10 ):
snake_case__ = hackernews_top_stories(__lowerCAmelCase )
return "\n".join("* [{title}]({url})".format(**__lowerCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 276
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
__magic_name__ = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
__magic_name__ = {
'''ctrl''': 256,
}
__magic_name__ = {
'''Pregnancy''': 168_629,
'''Christianity''': 7_675,
'''Explain''': 106_423,
'''Fitness''': 63_440,
'''Saving''': 63_163,
'''Ask''': 27_171,
'''Ass''': 95_985,
'''Joke''': 163_509,
'''Questions''': 45_622,
'''Thoughts''': 49_605,
'''Retail''': 52_342,
'''Feminism''': 164_338,
'''Writing''': 11_992,
'''Atheism''': 192_263,
'''Netflix''': 48_616,
'''Computing''': 39_639,
'''Opinion''': 43_213,
'''Alone''': 44_967,
'''Funny''': 58_917,
'''Gaming''': 40_358,
'''Human''': 4_088,
'''India''': 1_331,
'''Joker''': 77_138,
'''Diet''': 36_206,
'''Legal''': 11_859,
'''Norman''': 4_939,
'''Tip''': 72_689,
'''Weight''': 52_343,
'''Movies''': 46_273,
'''Running''': 23_425,
'''Science''': 2_090,
'''Horror''': 37_793,
'''Confession''': 60_572,
'''Finance''': 12_250,
'''Politics''': 16_360,
'''Scary''': 191_985,
'''Support''': 12_654,
'''Technologies''': 32_516,
'''Teenage''': 66_160,
'''Event''': 32_769,
'''Learned''': 67_460,
'''Notion''': 182_770,
'''Wikipedia''': 37_583,
'''Books''': 6_665,
'''Extract''': 76_050,
'''Confessions''': 102_701,
'''Conspiracy''': 75_932,
'''Links''': 63_674,
'''Narcissus''': 150_425,
'''Relationship''': 54_766,
'''Relationships''': 134_796,
'''Reviews''': 41_671,
'''News''': 4_256,
'''Translation''': 26_820,
'''multilingual''': 128_406,
}
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
snake_case__ = set()
snake_case__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ = char
snake_case__ = set(__lowerCAmelCase )
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = VOCAB_FILES_NAMES
_A : str = PRETRAINED_VOCAB_FILES_MAP
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[Any] = CONTROL_CODES
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="<unk>" , **lowerCamelCase ):
super().__init__(unk_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
snake_case__ = json.load(lowerCamelCase )
snake_case__ = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
snake_case__ = merges_handle.read().split("\n" )[1:-1]
snake_case__ = [tuple(merge.split() ) for merge in merges]
snake_case__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
snake_case__ = tuple(lowerCamelCase )
snake_case__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
snake_case__ = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__ , snake_case__ = bigram
snake_case__ = []
snake_case__ = 0
while i < len(lowerCamelCase ):
try:
snake_case__ = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ = tuple(lowerCamelCase )
snake_case__ = new_word
if len(lowerCamelCase ) == 1:
break
else:
snake_case__ = get_pairs(lowerCamelCase )
snake_case__ = "@@ ".join(lowerCamelCase )
snake_case__ = word[:-4]
snake_case__ = word
return word
def A_ ( self , lowerCamelCase ):
snake_case__ = []
snake_case__ = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def A_ ( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase , self.unk_token )
def A_ ( self , lowerCamelCase ):
snake_case__ = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def A_ ( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
snake_case__ = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
snake_case__ = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 276
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ['''input_values''', '''attention_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = 16_000 ,_SCREAMING_SNAKE_CASE = 0.0 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = 80 ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = "hann_window" ,_SCREAMING_SNAKE_CASE = 1.0 ,_SCREAMING_SNAKE_CASE = 80 ,_SCREAMING_SNAKE_CASE = 7_600 ,_SCREAMING_SNAKE_CASE = 1e-10 ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = True ,**_SCREAMING_SNAKE_CASE ,) -> Dict:
super().__init__(feature_size=_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,padding_value=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = do_normalize
UpperCAmelCase_ : Dict = return_attention_mask
UpperCAmelCase_ : List[str] = num_mel_bins
UpperCAmelCase_ : str = hop_length
UpperCAmelCase_ : List[Any] = win_length
UpperCAmelCase_ : Optional[Any] = win_function
UpperCAmelCase_ : int = frame_signal_scale
UpperCAmelCase_ : List[str] = fmin
UpperCAmelCase_ : List[Any] = fmax
UpperCAmelCase_ : str = mel_floor
UpperCAmelCase_ : List[str] = reduction_factor
UpperCAmelCase_ : Dict = win_length * sampling_rate // 1_000
UpperCAmelCase_ : Dict = hop_length * sampling_rate // 1_000
UpperCAmelCase_ : Optional[Any] = optimal_fft_length(self.sample_size )
UpperCAmelCase_ : List[Any] = (self.n_fft // 2) + 1
UpperCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.num_mel_bins ,min_frequency=self.fmin ,max_frequency=self.fmax ,sampling_rate=self.sampling_rate ,norm='''slaney''' ,mel_scale='''slaney''' ,)
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' ,_SCREAMING_SNAKE_CASE ,)
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' ,_SCREAMING_SNAKE_CASE ,)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
UpperCAmelCase_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ,np.intaa )
UpperCAmelCase_ : Optional[int] = []
for vector, length in zip(_SCREAMING_SNAKE_CASE ,attention_mask.sum(-1 ) ):
UpperCAmelCase_ : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCAmelCase_ : Dict = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def a__ ( self ,_SCREAMING_SNAKE_CASE ,) -> np.ndarray:
UpperCAmelCase_ : Optional[int] = spectrogram(
_SCREAMING_SNAKE_CASE ,window=self.window ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,mel_filters=self.mel_filters ,mel_floor=self.mel_floor ,log_mel='''log10''' ,)
return log_mel_spec.T
def __call__( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
UpperCAmelCase_ : Dict = self._process_audio(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
else:
UpperCAmelCase_ : Dict = None
if audio_target is not None:
UpperCAmelCase_ : str = self._process_audio(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
if inputs is None:
return inputs_target
else:
UpperCAmelCase_ : int = inputs_target['''input_values''']
UpperCAmelCase_ : Optional[Any] = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : Any = decoder_attention_mask
return inputs
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchFeature:
UpperCAmelCase_ : Dict = isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCAmelCase_ : Optional[Any] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE ,(list, tuple) ) and (isinstance(speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ : Optional[int] = [np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
UpperCAmelCase_ : Tuple = np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : int = speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ : Any = [speech]
# needed to make pad() work on spectrogram inputs
UpperCAmelCase_ : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
UpperCAmelCase_ : Optional[int] = [self._extract_mel_features(_SCREAMING_SNAKE_CASE ) for waveform in speech]
UpperCAmelCase_ : str = BatchFeature({'''input_values''': features} )
UpperCAmelCase_ : List[Any] = self.num_mel_bins
else:
UpperCAmelCase_ : List[str] = BatchFeature({'''input_values''': speech} )
UpperCAmelCase_ : Optional[int] = self.pad(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Union[str, Any] = feature_size_hack
# convert input values to correct format
UpperCAmelCase_ : Dict = padded_inputs['''input_values''']
if not isinstance(input_values[0] ,np.ndarray ):
UpperCAmelCase_ : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
and isinstance(input_values[0] ,np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
UpperCAmelCase_ : Any = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
UpperCAmelCase_ : Optional[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase_ : str = [np.asarray(_SCREAMING_SNAKE_CASE ,dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
UpperCAmelCase_ : str = (
attention_mask
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase_ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] ,attention_mask=_SCREAMING_SNAKE_CASE ,padding_value=self.padding_value )
if return_tensors is not None:
UpperCAmelCase_ : Tuple = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def a__ ( self ) -> Dict[str, Any]:
UpperCAmelCase_ : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
UpperCAmelCase_ : Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 709
|
import os
import string
import sys
__a = 1 << 8
__a = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__a = KEYMAP['up']
__a = KEYMAP['left']
if sys.platform == "win32":
__a = []
__a = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__a = ord(str(i))
def lowerCamelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
UpperCAmelCase_ : Union[str, Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
UpperCAmelCase_ : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase_ : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase_ : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCAmelCase_ : Tuple = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCAmelCase_ : Dict = cha[1]
else:
UpperCAmelCase_ : int = ch.decode(_lowercase )
else:
UpperCAmelCase_ : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase_ : str = sys.stdin.fileno()
UpperCAmelCase_ : Optional[Any] = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
UpperCAmelCase_ : Dict = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
UpperCAmelCase_ : Union[str, Any] = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
UpperCAmelCase_ : Optional[int] = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 300
| 0
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0.2 , _lowerCAmelCase=0.2 ) -> List[Any]:
_lowerCAmelCase = bp_numa
_lowerCAmelCase = bp_numa
_lowerCAmelCase = bp_numa
_lowerCAmelCase = conva_get[:2]
_lowerCAmelCase = conva_get[2]
_lowerCAmelCase = size_pa
_lowerCAmelCase = rate_w
_lowerCAmelCase = rate_t
_lowerCAmelCase = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
_lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
_lowerCAmelCase = -2 * np.random.rand(self.conva[1] ) + 1
_lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
_lowerCAmelCase = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
# save model dict with pickle
_lowerCAmelCase = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(_lowerCAmelCase , "wb" ) as f:
pickle.dump(_lowerCAmelCase , _lowerCAmelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _snake_case ( cls , _lowerCAmelCase ) -> Any:
# read saved model
with open(_lowerCAmelCase , "rb" ) as f:
_lowerCAmelCase = pickle.load(_lowerCAmelCase ) # noqa: S301
_lowerCAmelCase = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
_lowerCAmelCase = model_dic.get("size_pooling1" )
_lowerCAmelCase = model_dic.get("num_bp1" )
_lowerCAmelCase = model_dic.get("num_bp2" )
_lowerCAmelCase = model_dic.get("num_bp3" )
_lowerCAmelCase = model_dic.get("rate_weight" )
_lowerCAmelCase = model_dic.get("rate_thre" )
# create model instance
_lowerCAmelCase = CNN(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# modify model parameter
_lowerCAmelCase = model_dic.get("w_conv1" )
_lowerCAmelCase = model_dic.get("wkj" )
_lowerCAmelCase = model_dic.get("vji" )
_lowerCAmelCase = model_dic.get("thre_conv1" )
_lowerCAmelCase = model_dic.get("thre_bp2" )
_lowerCAmelCase = model_dic.get("thre_bp3" )
return conv_ins
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
return round(_lowerCAmelCase , 3 )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
# convolution process
_lowerCAmelCase = convs[0]
_lowerCAmelCase = convs[1]
_lowerCAmelCase = np.shape(_lowerCAmelCase )[0]
# get the data slice of original image data, data_focus
_lowerCAmelCase = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowerCAmelCase ):
_lowerCAmelCase = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowerCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
_lowerCAmelCase = []
_lowerCAmelCase = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowerCAmelCase ):
_lowerCAmelCase = []
for i_focus in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowerCAmelCase ) )
_lowerCAmelCase = np.asmatrix(_lowerCAmelCase ).reshape(
_lowerCAmelCase , _lowerCAmelCase )
data_featuremap.append(_lowerCAmelCase )
# expanding the data slice to One dimenssion
_lowerCAmelCase = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowerCAmelCase ) )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="average_pool" ) -> Any:
# pooling process
_lowerCAmelCase = len(featuremaps[0] )
_lowerCAmelCase = int(size_map / size_pooling )
_lowerCAmelCase = []
for i_map in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = featuremaps[i_map]
_lowerCAmelCase = []
for i_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
for j_focus in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowerCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowerCAmelCase ) )
_lowerCAmelCase = np.asmatrix(_lowerCAmelCase ).reshape(_lowerCAmelCase , _lowerCAmelCase )
featuremap_pooled.append(_lowerCAmelCase )
return featuremap_pooled
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
# expanding three dimension data to one dimension list
_lowerCAmelCase = []
for i in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = np.shape(data[i] )
_lowerCAmelCase = data[i].reshape(1 , shapes[0] * shapes[1] )
_lowerCAmelCase = data_listed.getA().tolist()[0]
data_expanded.extend(_lowerCAmelCase )
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
return data_expanded
def _snake_case ( self , _lowerCAmelCase ) -> Any:
# expanding matrix to one dimension list
_lowerCAmelCase = np.asarray(_lowerCAmelCase )
_lowerCAmelCase = np.shape(_lowerCAmelCase )
_lowerCAmelCase = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
_lowerCAmelCase = []
_lowerCAmelCase = 0
for i_map in range(_lowerCAmelCase ):
_lowerCAmelCase = np.ones((size_map, size_map) )
for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
for j in range(0 , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = pd_pool[
i_pool
]
_lowerCAmelCase = i_pool + 1
_lowerCAmelCase = np.multiply(
_lowerCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowerCAmelCase )
return pd_all
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=bool ) -> Tuple:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(_lowerCAmelCase )) )
print((" - - Shape: Teach_Data ", np.shape(_lowerCAmelCase )) )
_lowerCAmelCase = 0
_lowerCAmelCase = []
_lowerCAmelCase = 10000
while rp < n_repeat and mse >= error_accuracy:
_lowerCAmelCase = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_lowerCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
_lowerCAmelCase = np.asmatrix(datas_train[p] )
_lowerCAmelCase = np.asarray(datas_teach[p] )
_lowerCAmelCase , _lowerCAmelCase = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga )
_lowerCAmelCase = np.shape(_lowerCAmelCase )
_lowerCAmelCase = self._expand(_lowerCAmelCase )
_lowerCAmelCase = data_bp_input
_lowerCAmelCase = np.dot(_lowerCAmelCase , self.vji.T ) - self.thre_bpa
_lowerCAmelCase = self.sig(_lowerCAmelCase )
_lowerCAmelCase = np.dot(_lowerCAmelCase , self.wkj.T ) - self.thre_bpa
_lowerCAmelCase = self.sig(_lowerCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_lowerCAmelCase = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) )
_lowerCAmelCase = np.multiply(
np.dot(_lowerCAmelCase , self.wkj ) , np.multiply(_lowerCAmelCase , (1 - bp_outa) ) )
_lowerCAmelCase = np.dot(_lowerCAmelCase , self.vji )
_lowerCAmelCase = pd_i_all / (self.size_poolinga * self.size_poolinga)
_lowerCAmelCase = pd_conva_pooled.T.getA().tolist()
_lowerCAmelCase = self._calculate_gradient_from_pool(
_lowerCAmelCase , _lowerCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
_lowerCAmelCase = self._expand_mat(pd_conva_all[k_conv] )
_lowerCAmelCase = self.rate_weight * np.dot(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
_lowerCAmelCase = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
_lowerCAmelCase = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_lowerCAmelCase = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_lowerCAmelCase = self.thre_bpa - pd_k_all * self.rate_thre
_lowerCAmelCase = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_lowerCAmelCase = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_lowerCAmelCase = rp + 1
_lowerCAmelCase = error_count / patterns
all_mse.append(_lowerCAmelCase )
def draw_error():
_lowerCAmelCase = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowerCAmelCase , "+-" )
plt.plot(_lowerCAmelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(_lowerCAmelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _lowerCAmelCase ) -> Optional[Any]:
# model predict
_lowerCAmelCase = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(_lowerCAmelCase )) )
for p in range(len(_lowerCAmelCase ) ):
_lowerCAmelCase = np.asmatrix(datas_test[p] )
_lowerCAmelCase , _lowerCAmelCase = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga )
_lowerCAmelCase = self._expand(_lowerCAmelCase )
_lowerCAmelCase = data_bp_input
_lowerCAmelCase = bp_outa * self.vji.T - self.thre_bpa
_lowerCAmelCase = self.sig(_lowerCAmelCase )
_lowerCAmelCase = bp_outa * self.wkj.T - self.thre_bpa
_lowerCAmelCase = self.sig(_lowerCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
_lowerCAmelCase = [list(map(self.do_round , _lowerCAmelCase ) ) for each in produce_out]
return np.asarray(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Tuple:
# return the data of image after convoluting process so we can check it out
_lowerCAmelCase = np.asmatrix(_lowerCAmelCase )
_lowerCAmelCase , _lowerCAmelCase = self.convolute(
_lowerCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_lowerCAmelCase = self.pooling(_lowerCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 18
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18
| 1
|
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowerCAmelCase = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowerCAmelCase = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowerCAmelCase = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowerCAmelCase = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowerCAmelCase = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def UpperCAmelCase_ ():
"""simple docstring"""
_a, _a : Union[str, Any] = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
_a : Dict = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_a, _a : str = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase_ (__a : int = 1_0_0 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def UpperCAmelCase_ (__a : Dict , __a : Any ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def UpperCAmelCase_ (__a : List[str] , __a : List[Any] ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCamelCase )
def UpperCAmelCase_ (__a : Tuple , __a : Dict , __a : Dict ):
"""simple docstring"""
_a : Tuple = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def UpperCAmelCase_ (__a : Optional[int] , __a : int ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def UpperCAmelCase_ (__a : List[Any] , __a : Any ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCamelCase )
def UpperCAmelCase_ (__a : Optional[int] , __a : Dict , __a : Any ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCAmelCase_ (__a : List[str] , __a : Dict , __a : str ):
"""simple docstring"""
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
_a : Union[str, Any] = poker_hands.copy()
shuffle(_UpperCamelCase )
_a : List[str] = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Union[str, Any] = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = PokerHand('2C 4S AS 3D 5C' )
_a : str = True
_a : Optional[Any] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 0
_a : Union[str, Any] = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
_a : List[str] = os.path.join(_UpperCamelCase , 'poker_hands.txt' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
_a : Any = line[:1_4].strip()
_a : Optional[Any] = line[1_5:].strip()
_a, _a : List[str] = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
_a : List[str] = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 716
|
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase_ (__a : Dict , __a : Any=7 ):
"""simple docstring"""
_a : Dict = None
if token is not None:
_a : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
_a : Optional[Any] = '636036'
_a : str = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
_a : List[Any] = requests.get(__a , headers=__a ).json()
return result["workflow_runs"]
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
_a : Optional[Any] = get_daily_ci_runs(__a )
_a : List[str] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_a : Tuple = workflow_run['id']
break
return workflow_run_id
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] ):
"""simple docstring"""
_a : Tuple = get_last_daily_ci_runs(__a )
if workflow_run_id is not None:
_a : Optional[int] = get_artifacts_links(worflow_run_id=__a , token=__a )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_a : Optional[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__a , artifact_url=__a , output_dir=__a , token=__a )
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Any ):
"""simple docstring"""
get_last_daily_ci_artifacts(__a , __a , __a )
_a : List[Any] = {}
for artifact_name in artifact_names:
_a : int = os.path.join(__a , f"""{artifact_name}.zip""" )
if os.path.isfile(__a ):
_a : str = {}
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
with z.open(__a ) as f:
_a : Optional[Any] = f.read().decode('UTF-8' )
return results
| 319
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : List[str] = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 543
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
lowerCAmelCase : Dict = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def a__ ( snake_case__ , snake_case__ ) -> Optional[Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a__ ( snake_case__ ) -> int:
lowerCamelCase = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
lowerCamelCase = TestCommand(*snake_case__ )
test_command.run()
lowerCamelCase = os.path.join(snake_case__ , """README.md""" )
assert os.path.exists(snake_case__ )
lowerCamelCase = DatasetInfosDict.from_directory(snake_case__ )
lowerCamelCase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_35_15_63,
"""num_examples""": 1_00_00,
},
{
"""name""": """validation""",
"""num_bytes""": 23_84_18,
"""num_examples""": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase , lowerCamelCase = getattr(dataset_infos["""default"""] , snake_case__ ), getattr(expected_dataset_infos["""default"""] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 543
| 1
|
def __A ( _A ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__a = gray_code_sequence_string(_A )
#
# convert them to integers
for i in range(len(_A ) ):
__a = int(sequence[i] , 2 )
return sequence
def __A ( _A ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__a = gray_code_sequence_string(bit_count - 1 )
__a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__a = "0" + smaller_sequence[i]
sequence.append(_A )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__a = "1" + smaller_sequence[i]
sequence.append(_A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
def __A ( _A = 100_0000 ):
"""simple docstring"""
__a = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , _A ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 525
| 0
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[Any] = 'linear'
_snake_case : Dict = 'cosine'
_snake_case : Optional[Any] = 'cosine_with_restarts'
_snake_case : List[Any] = 'polynomial'
_snake_case : Optional[int] = 'constant'
_snake_case : Tuple = 'constant_with_warmup'
_snake_case : int = 'piecewise_constant'
def a__ ( lowercase : Optimizer, lowercase : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(lowercase, lambda lowercase : 1, last_epoch=lowercase )
def a__ ( lowercase : Optimizer, lowercase : int, lowercase : int = -1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1.0, lowercase ) )
return 1.0
return LambdaLR(lowercase, lowercase, last_epoch=lowercase )
def a__ ( lowercase : Optimizer, lowercase : str, lowercase : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
_UpperCamelCase , _UpperCamelCase = rule_str.split(''':''' )
_UpperCamelCase = int(lowercase )
_UpperCamelCase = float(lowercase )
_UpperCamelCase = value
_UpperCamelCase = float(rule_list[-1] )
def create_rules_function(lowercase : Optional[int], lowercase : str ):
def rule_func(lowercase : int ) -> float:
_UpperCamelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCamelCase = create_rules_function(lowercase, lowercase )
return LambdaLR(lowercase, lowercase, last_epoch=lowercase )
def a__ ( lowercase : Optional[int], lowercase : Union[str, Any], lowercase : str, lowercase : List[str]=-1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1, lowercase ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optimizer, lowercase : int, lowercase : int, lowercase : float = 0.5, lowercase : int = -1 ) -> Tuple:
"""simple docstring"""
def lr_lambda(lowercase : str ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1, lowercase ) )
_UpperCamelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(lowercase ) * 2.0 * progress )) )
return LambdaLR(lowercase, lowercase, lowercase )
def a__ ( lowercase : Optimizer, lowercase : int, lowercase : int, lowercase : int = 1, lowercase : int = -1 ) -> Optional[int]:
"""simple docstring"""
def lr_lambda(lowercase : List[str] ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1, lowercase ) )
_UpperCamelCase = float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(lowercase ) * progress) % 1.0) )) )
return LambdaLR(lowercase, lowercase, lowercase )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : Any, lowercase : List[str]=1e-7, lowercase : List[Any]=1.0, lowercase : Any=-1 ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowercase : int ):
if current_step < num_warmup_steps:
return float(lowercase ) / float(max(1, lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCamelCase = lr_init - lr_end
_UpperCamelCase = num_training_steps - num_warmup_steps
_UpperCamelCase = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCamelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowercase, lowercase, lowercase )
lowercase__ : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a__ ( lowercase : Union[str, SchedulerType], lowercase : Optimizer, lowercase : Optional[str] = None, lowercase : Optional[int] = None, lowercase : Optional[int] = None, lowercase : int = 1, lowercase : float = 1.0, lowercase : int = -1, ) -> Any:
"""simple docstring"""
_UpperCamelCase = SchedulerType(lowercase )
_UpperCamelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowercase, last_epoch=lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowercase, step_rules=lowercase, last_epoch=lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowercase, num_warmup_steps=lowercase, last_epoch=lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowercase, num_warmup_steps=lowercase, num_training_steps=lowercase, num_cycles=lowercase, last_epoch=lowercase, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowercase, num_warmup_steps=lowercase, num_training_steps=lowercase, power=lowercase, last_epoch=lowercase, )
return schedule_func(
lowercase, num_warmup_steps=lowercase, num_training_steps=lowercase, last_epoch=lowercase )
| 98
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Dict ):
__lowerCamelCase : Union[str, Any] =inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Union[str, Any] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__lowerCamelCase : List[str] =test_metrics
@require_cpu
def __lowercase ( self :Optional[Any] ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __lowercase ( self :int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __lowercase ( self :int ):
self.test_metrics.main()
@require_multi_gpu
def __lowercase ( self :Dict ):
print(f'Found {torch.cuda.device_count()} devices.' )
__lowerCamelCase : Union[str, Any] =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 179
| 0
|
from __future__ import annotations
from math import pi
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case_ : Optional[Any] =Lock()
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__A = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__A = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
__A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__A = temp_rs
__A = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
__A = Pipe()
__A = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__A = temp_rs
__A = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
__A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase ( ):
'''simple docstring'''
__A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowerCAmelCase__ )
__A = odd_even_transposition(lowerCAmelCase__ )
print("Sorted List\n" )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 205
| 0
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = RoFormerTokenizer
UpperCAmelCase__ = RoFormerTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = True
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
super().setUp()
def lowerCamelCase__ ( self : Optional[Any] , **__snake_case : Union[str, Any] ) -> Tuple:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict , **__snake_case : Optional[int] ) -> List[Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__magic_name__: Dict = """永和服装饰品有限公司,今天天气非常好"""
__magic_name__: Union[str, Any] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
__magic_name__: int = self.get_tokenizer()
__magic_name__, __magic_name__: List[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[str] = tokens + [tokenizer.unk_token]
__magic_name__: Tuple = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Any ) -> Tuple:
__magic_name__: str = self.get_rust_tokenizer()
__magic_name__, __magic_name__: Optional[Any] = self.get_chinese_input_output_texts()
__magic_name__: Any = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , output_text.split() )
__magic_name__: List[Any] = tokens + [tokenizer.unk_token]
__magic_name__: List[str] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
pass
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
pass
| 96
|
from __future__ import annotations
__magic_name__ = list[list[int]]
# assigning initial values to the grid
__magic_name__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__magic_name__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _lowerCAmelCase ( A__: Matrix , A__: int , A__: int , A__: int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
if location := find_empty_location(A__ ):
UpperCAmelCase , UpperCAmelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A__ , A__ , A__ , A__ ):
UpperCAmelCase = digit
if sudoku(A__ ) is not None:
return grid
UpperCAmelCase = 0
return None
def _lowerCAmelCase ( A__: Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(A__ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__magic_name__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 254
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[Any] = "markuplm"
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=0 , a=0 , a=2 , a=2_5_6 , a=1_0_2_4 , a=2_1_6 , a=1_0_0_1 , a=3_2 , a=5_0 , a="absolute" , a=True , a=None , **a , ) -> List[str]:
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowercase__ : int = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Optional[Any] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : List[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : str = classifier_dropout
# additional properties
lowercase__ : Dict = max_depth
lowercase__ : Dict = max_xpath_tag_unit_embeddings
lowercase__ : List[str] = max_xpath_subs_unit_embeddings
lowercase__ : Union[str, Any] = tag_pad_id
lowercase__ : List[Any] = subs_pad_id
lowercase__ : int = xpath_unit_hidden_size
| 645
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int:
lowercase__ : int = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : str = patch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[str] = embed_dim
lowercase__ : Any = depths
lowercase__ : Dict = num_heads
lowercase__ : List[str] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Tuple = qkv_bias
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Tuple = drop_path_rate
lowercase__ : List[str] = hidden_act
lowercase__ : Optional[Any] = use_absolute_embeddings
lowercase__ : Optional[Any] = patch_norm
lowercase__ : Any = layer_norm_eps
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = is_training
lowercase__ : int = scope
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : List[str] = encoder_stride
lowercase__ : Optional[Any] = out_features
lowercase__ : Dict = out_indices
def _UpperCAmelCase ( self ) -> Tuple:
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self , a , a , a ) -> Dict:
lowercase__ : Tuple = MaskFormerSwinModel(config=a )
model.to(a )
model.eval()
lowercase__ : str = model(a )
lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]:
lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a )
model.to(a )
model.eval()
lowercase__ : int = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(a ):
lowercase__ : Dict = ['stem']
lowercase__ : List[str] = MaskFormerSwinBackbone(config=a )
def _UpperCAmelCase ( self ) -> str:
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase):
lowerCamelCase__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase__ : str = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Any = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = MaskFormerSwinModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> str:
return
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
@unittest.skip('Swin does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _UpperCAmelCase ( self ) -> str:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def _UpperCAmelCase ( self ) -> int:
pass
def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple:
lowercase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(a , a ) )
lowercase__ : List[Any] = outputs.hidden_states
lowercase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swin has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _UpperCAmelCase ( self ) -> Optional[int]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = 3
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def _UpperCAmelCase ( self ) -> Any:
pass
def _UpperCAmelCase ( self ) -> Any:
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a ):
lowercase__ : Union[str, Any] = 0
return t
def check_equivalence(a , a , a , a={} ):
with torch.no_grad():
lowercase__ : Optional[Any] = model(**a , return_dict=a , **a )
lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple()
def recursive_check(a , a ):
if isinstance(a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a , a ):
recursive_check(a , a )
elif isinstance(a , a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a , a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has"""
f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}."""
) , )
recursive_check(a , a )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(a )
model.to(a )
model.eval()
lowercase__ : Tuple = self._prepare_for_class(a , a )
lowercase__ : Optional[Any] = self._prepare_for_class(a , a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a )
lowercase__ : Any = self._prepare_for_class(a , a )
lowercase__ : int = self._prepare_for_class(a , a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a )
lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a )
check_equivalence(a , a , a , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _a):
lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[int] = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowercase__ : Optional[Any] = backbone_class(a )
backbone.to(a )
backbone.eval()
lowercase__ : Union[str, Any] = backbone(**a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase__ : List[str] = backbone(**a , output_hidden_states=a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase__ : List[Any] = backbone(**a , output_attentions=a )
self.assertIsNotNone(outputs.attentions )
| 645
| 1
|
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowerCAmelCase = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_lowerCAmelCase = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_lowerCAmelCase = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def UpperCamelCase ( _A , _A ) -> List[Any]:
return float((preds == labels).mean() )
def UpperCamelCase ( _A , _A ) -> List[str]:
lowercase : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
lowercase : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase ( _A , _A ) -> int:
lowercase : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
lowercase : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase (datasets.Metric ):
def __snake_case ( self :Dict ) ->Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __snake_case ( self :Optional[int] , __magic_name__ :Any , __magic_name__ :Any ) ->Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 264
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__UpperCAmelCase : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__UpperCAmelCase : Union[str, Any] = {
'ctrl': 256,
}
__UpperCAmelCase : Tuple = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Any = set()
_a : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a : Any = char
_a : Dict = set(UpperCamelCase_ )
return pairs
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = CONTROL_CODES
def __init__( self : Union[str, Any] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[Any]="<unk>" , **__snake_case : Tuple ) -> Tuple:
super().__init__(unk_token=__snake_case , **__snake_case )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
_a : List[str] = json.load(__snake_case )
_a : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
_a : str = merges_handle.read().split('''\n''' )[1:-1]
_a : List[str] = [tuple(merge.split() ) for merge in merges]
_a : Optional[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_a : int = {}
@property
def snake_case_ ( self : str ) -> Tuple:
return len(self.encoder )
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Optional[Any] , __snake_case : str ) -> Tuple:
if token in self.cache:
return self.cache[token]
_a : int = tuple(__snake_case )
_a : Any = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a : Tuple = get_pairs(__snake_case )
if not pairs:
return token
while True:
_a : List[str] = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a : Tuple = bigram
_a : Tuple = []
_a : Tuple = 0
while i < len(__snake_case ):
try:
_a : List[Any] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a : str = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a : Tuple = tuple(__snake_case )
_a : str = new_word
if len(__snake_case ) == 1:
break
else:
_a : Dict = get_pairs(__snake_case )
_a : List[Any] = '''@@ '''.join(__snake_case )
_a : int = word[:-4]
_a : List[str] = word
return word
def snake_case_ ( self : Optional[Any] , __snake_case : Any ) -> Optional[int]:
_a : Any = []
_a : Union[str, Any] = re.findall(r'''\S+\n?''' , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : int , __snake_case : str ) -> Dict:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : int , __snake_case : List[Any] ) -> List[str]:
return self.decoder.get(__snake_case , self.unk_token )
def snake_case_ ( self : List[str] , __snake_case : Tuple ) -> int:
_a : List[str] = ''' '''.join(__snake_case ).replace('''@@ ''' , '''''' ).strip()
return out_string
def snake_case_ ( self : Any , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
_a : Any = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_a : List[str] = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 471
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__A : Dict = True
except (ImportError, ModuleNotFoundError):
__A : Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
re.sub("<n>", "", _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 701
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , lowercase__ : List[Any] , lowercase__ : int=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : List[Any]=True , lowercase__ : Union[str, Any]=True , lowercase__ : Any=9_9 , lowercase__ : Tuple=[1, 1, 2] , lowercase__ : str=1 , lowercase__ : Union[str, Any]=3_2 , lowercase__ : int=4 , lowercase__ : Dict=8 , lowercase__ : Tuple=3_7 , lowercase__ : int="gelu_new" , lowercase__ : Tuple=0.1 , lowercase__ : int=0.1 , lowercase__ : Dict=0.0 , lowercase__ : int=5_1_2 , lowercase__ : str=3 , lowercase__ : List[Any]=0.0_2 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : Tuple=None , lowercase__ : List[Any]=False , ):
__lowercase : Any = parent
__lowercase : Tuple = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : List[Any] = is_training
__lowercase : Tuple = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Any = vocab_size
__lowercase : Union[str, Any] = block_sizes
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : str = d_model
__lowercase : Tuple = n_head
__lowercase : Any = d_head
__lowercase : Dict = d_inner
__lowercase : Optional[Any] = hidden_act
__lowercase : int = hidden_dropout
__lowercase : int = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Union[str, Any] = 2
__lowercase : Optional[int] = num_labels
__lowercase : List[str] = num_choices
__lowercase : List[Any] = scope
__lowercase : List[str] = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase : str = n_head
# Used in the tests to check the size of the first hidden state
__lowercase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase : Dict = self.num_hidden_layers + 2
def snake_case ( self : List[Any] ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Any = None
__lowercase : str = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Union[str, Any] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : Optional[Any] = [input_ids, input_mask]
__lowercase : int = model(lowercase__ )
__lowercase : Tuple = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : int = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : List[str] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : str = False
__lowercase : int = TFFunnelModel(config=lowercase__ )
__lowercase : Dict = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any] , ):
__lowercase : List[str] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : List[Any] = model(lowercase__ )
__lowercase : List[Any] = [input_ids, input_mask]
__lowercase : Optional[int] = model(lowercase__ )
__lowercase : Optional[Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase : Any = False
__lowercase : Any = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase : List[Any] = False
__lowercase : Optional[int] = TFFunnelBaseModel(config=lowercase__ )
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Optional[Any] , ):
__lowercase : Tuple = TFFunnelForPreTraining(config=lowercase__ )
__lowercase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : int , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , ):
__lowercase : Optional[int] = TFFunnelForMaskedLM(config=lowercase__ )
__lowercase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Optional[int] = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : int , ):
__lowercase : str = self.num_labels
__lowercase : List[Any] = TFFunnelForSequenceClassification(config=lowercase__ )
__lowercase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Union[str, Any] , ):
__lowercase : Dict = self.num_choices
__lowercase : List[str] = TFFunnelForMultipleChoice(config=lowercase__ )
__lowercase : Any = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = tf.tile(tf.expand_dims(lowercase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowercase : str = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : Any , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Any , lowercase__ : Optional[int] , ):
__lowercase : Tuple = self.num_labels
__lowercase : int = TFFunnelForTokenClassification(config=lowercase__ )
__lowercase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : int = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : int , ):
__lowercase : List[str] = TFFunnelForQuestionAnswering(config=lowercase__ )
__lowercase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowercase : Union[str, Any] = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : str ):
__lowercase : Optional[int] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : List[Any] = config_and_inputs
__lowercase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
def snake_case ( self : Dict ):
__lowercase : List[Any] = TFFunnelModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def snake_case ( self : str ):
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def snake_case ( self : Any ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
def snake_case ( self : str ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
@require_tf
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
__UpperCAmelCase : str = False
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[int] = TFFunnelModelTester(self , base=lowercase__ )
__lowercase : List[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase__ )
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
| 281
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
_snake_case : Dict = parser.parse_args()
if args.model_type == "bert":
_snake_case : Dict = BertForMaskedLM.from_pretrained(args.model_name)
_snake_case : List[Any] = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
_snake_case : List[str] = model.state_dict()
_snake_case : List[Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
_snake_case : Union[str, Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
_snake_case : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
_snake_case : Optional[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
_snake_case : int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
_snake_case : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
_snake_case : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
_snake_case : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
_snake_case : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
_snake_case : Dict = state_dict["cls.predictions.decoder.weight"]
_snake_case : Tuple = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
_snake_case : Dict = state_dict[f'''cls.predictions.transform.dense.{w}''']
_snake_case : int = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 81
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
_snake_case : Optional[int] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ) -> None:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = """trocr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , a__=5_02_65 , a__=10_24 , a__=12 , a__=16 , a__=40_96 , a__="gelu" , a__=5_12 , a__=0.1 , a__=0.0 , a__=0.0 , a__=2 , a__=0.02 , a__=0.0 , a__=True , a__=False , a__=True , a__=True , a__=1 , a__=0 , a__=2 , **a__ , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = activation_function
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = init_std
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_learned_position_embeddings
_UpperCAmelCase = layernorm_embedding
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
| 494
| 0
|
def UpperCamelCase_ ( __a , __a , __a , __a ) -> int:
a__, a__ : Union[str, Any] = len(__a ), len(grid[0] )
if (
min(__a , __a ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
a__ : List[Any] = 0
count += depth_first_search(__a , row + 1 , __a , __a )
count += depth_first_search(__a , row - 1 , __a , __a )
count += depth_first_search(__a , __a , col + 1 , __a )
count += depth_first_search(__a , __a , col - 1 , __a )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def UpperCamelCase_ ( __a , __a , __a ) -> Optional[Any]:
a__ : Union[str, Any] = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
a__ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__a ):
os.makedirs(__a )
a__ : Any = model.state_dict()
def to_tf_var_name(__a ):
for patt, repl in iter(__a ):
a__ : Tuple = name.replace(__a , __a )
return f'''bert/{name}'''
def create_tf_var(__a , __a , __a ):
a__ : Tuple = tf.dtypes.as_dtype(tensor.dtype )
a__ : Dict = tf.get_variable(dtype=__a , shape=tensor.shape , name=__a , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__a )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ : int = to_tf_var_name(__a )
a__ : Union[str, Any] = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ : int = torch_tensor.T
a__ : Optional[Any] = create_tf_var(tensor=__a , name=__a , session=__a )
tf.keras.backend.set_value(__a , __a )
a__ : int = session.run(__a )
print(f'''Successfully created {tf_name}: {np.allclose(__a , __a )}''' )
a__ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__a , os.path.join(__a , model_name.replace("-" , "_" ) + ".ckpt" ) )
def UpperCamelCase_ ( __a=None ) -> int:
a__ : Dict = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__a , required=__a , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__a , default=__a , required=__a , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__a , required=__a , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__a , required=__a , help="Directory in which to save tensorflow model" )
a__ : Optional[Any] = parser.parse_args(__a )
a__ : Tuple = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__a , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 37
| 1
|
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 10 , __lowerCamelCase : int = 10_00 , __lowerCamelCase : bool = True ) -> int:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
return int((number_a + number_a) / 2 )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__lowerCamelCase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
_snake_case = lower
_snake_case = higher
_snake_case = []
while True:
_snake_case = get_avg(__lowerCamelCase , __lowerCamelCase )
last_numbers.append(__lowerCamelCase )
if answer(__lowerCamelCase ) == "low":
_snake_case = number
elif answer(__lowerCamelCase ) == "high":
_snake_case = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def _UpperCAmelCase ( ) -> None:
_snake_case = int(input('''Enter lower value : ''' ).strip() )
_snake_case = int(input('''Enter high value : ''' ).strip() )
_snake_case = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 717
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
__a = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__a = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ):
_snake_case = AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
# test with a raw waveform
_snake_case = np.zeros((34000,) )
_snake_case = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowercase ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
_snake_case , _snake_case = examples
_snake_case = audio_classifier(_lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
_snake_case = audio_classifier(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
self.run_torchaudio(_lowerCamelCase )
@require_torchaudio
def lowercase ( self : Optional[int] , _lowerCamelCase : str ):
import datasets
# test with a local file
_snake_case = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
_snake_case = dataset[0]['''audio''']['''array''']
_snake_case = audio_classifier(_lowerCamelCase )
self.assertEqual(
_lowerCamelCase , [
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
{'''score''': ANY(_lowerCamelCase ), '''label''': ANY(_lowerCamelCase )},
] , )
@require_torch
def lowercase ( self : Union[str, Any] ):
_snake_case = '''anton-l/wav2vec2-random-tiny-classifier'''
_snake_case = pipeline('''audio-classification''' , model=_lowerCamelCase )
_snake_case = np.ones((8000,) )
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
_snake_case = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
_snake_case = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_snake_case = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase ( self : Optional[int] ):
import datasets
_snake_case = '''superb/wav2vec2-base-superb-ks'''
_snake_case = pipeline('''audio-classification''' , model=_lowerCamelCase )
_snake_case = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
_snake_case = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
_snake_case = audio_classifier(_lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase ( self : Optional[int] ):
pass
| 430
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Any, UpperCamelCase_ : int=None ):
if rng is None:
UpperCAmelCase__ :Optional[Any] = random.Random()
UpperCAmelCase__ :int = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase__ :List[str] = []
for _ in range(UpperCamelCase_ ):
values.append(rng.randint(0, vocab_size - 1 ) )
UpperCAmelCase__ :Union[str, Any] = np.array(UpperCamelCase_, dtype=jnp.intaa ).reshape(UpperCamelCase_ )
return output
def a__ ( UpperCamelCase_ : List[str], UpperCamelCase_ : Any=None ):
UpperCAmelCase__ :Union[str, Any] = ids_tensor(UpperCamelCase_, vocab_size=2, rng=UpperCamelCase_ )
# make sure that at least one token is attended to for each batch
UpperCAmelCase__ :Optional[int] = 1
return attn_mask
@require_flax
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = ()
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ , UpperCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase__ :List[str] = 2
UpperCAmelCase__ :str = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase__ :str = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase__ :Optional[Any] = jnp.ones_like(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase__ :List[str] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase__ :Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :int = self._get_input_ids_and_config()
UpperCAmelCase__ :str = False
UpperCAmelCase__ :Any = max_length
UpperCAmelCase__ :str = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Dict = model_class(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase__ :Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :List[Any] = pt_model_class(__lowerCamelCase ).eval()
UpperCAmelCase__ :int = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
UpperCAmelCase__ :List[Any] = flax_model.generate(__lowerCamelCase ).sequences
UpperCAmelCase__ :int = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase__ :Optional[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = self._get_input_ids_and_config()
UpperCAmelCase__ :List[str] = False
UpperCAmelCase__ :Dict = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Tuple = model_class(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :List[str] = jit(model.generate )
UpperCAmelCase__ :Union[str, Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : str ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = self._get_input_ids_and_config()
UpperCAmelCase__ :Dict = True
UpperCAmelCase__ :List[str] = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :List[str] = model_class(__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Tuple = jit(model.generate )
UpperCAmelCase__ :Optional[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Optional[int] = self._get_input_ids_and_config()
UpperCAmelCase__ :Tuple = False
UpperCAmelCase__ :Union[str, Any] = max_length
UpperCAmelCase__ :List[Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Tuple = model_class(__lowerCamelCase )
UpperCAmelCase__ :str = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Any = jit(model.generate )
UpperCAmelCase__ :Any = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Dict = self._get_input_ids_and_config()
UpperCAmelCase__ :Union[str, Any] = False
UpperCAmelCase__ :Tuple = max_length
UpperCAmelCase__ :Union[str, Any] = 2
UpperCAmelCase__ :Union[str, Any] = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :str = model_class(__lowerCamelCase )
UpperCAmelCase__ :List[Any] = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __SCREAMING_SNAKE_CASE ( self : Any ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = self._get_input_ids_and_config()
UpperCAmelCase__ :Any = True
UpperCAmelCase__ :Union[str, Any] = max_length
UpperCAmelCase__ :Optional[Any] = 0.8
UpperCAmelCase__ :List[str] = 1_0
UpperCAmelCase__ :Dict = 0.3
UpperCAmelCase__ :Any = 1
UpperCAmelCase__ :Dict = 8
UpperCAmelCase__ :List[str] = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :List[Any] = model_class(__lowerCamelCase )
UpperCAmelCase__ :Tuple = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Tuple = jit(model.generate )
UpperCAmelCase__ :Dict = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :List[str] = self._get_input_ids_and_config()
UpperCAmelCase__ :List[Any] = max_length
UpperCAmelCase__ :List[str] = 1
UpperCAmelCase__ :Dict = 8
UpperCAmelCase__ :Tuple = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Any = model_class(__lowerCamelCase )
UpperCAmelCase__ :Any = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Any = jit(model.generate )
UpperCAmelCase__ :Union[str, Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Tuple = self._get_input_ids_and_config()
UpperCAmelCase__ :str = max_length
UpperCAmelCase__ :str = 2
UpperCAmelCase__ :List[str] = 1
UpperCAmelCase__ :List[str] = 8
UpperCAmelCase__ :str = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Tuple = model_class(__lowerCamelCase )
UpperCAmelCase__ :str = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :str = jit(model.generate )
UpperCAmelCase__ :List[Any] = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Any = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ :List[str] = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ :List[str] = False
UpperCAmelCase__ :List[Any] = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :str = model_class(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Any = jit(model.generate )
UpperCAmelCase__ :Dict = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : int ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Dict = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ :Tuple = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ :Optional[int] = True
UpperCAmelCase__ :Tuple = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Any = model_class(__lowerCamelCase )
UpperCAmelCase__ :List[str] = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :Optional[Any] = jit(model.generate )
UpperCAmelCase__ :Tuple = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ :Dict = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase__ :int = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase__ :Union[str, Any] = 2
UpperCAmelCase__ :List[str] = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ :Union[str, Any] = model_class(__lowerCamelCase )
UpperCAmelCase__ :Any = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
UpperCAmelCase__ :List[Any] = jit(model.generate )
UpperCAmelCase__ :str = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
UpperCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase__ :Dict = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase__ :Union[str, Any] = '''Hello world'''
UpperCAmelCase__ :List[str] = tokenizer(__lowerCamelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , '''do_samples''' ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , '''foo''' ):
UpperCAmelCase__ :Optional[int] = {'''foo''': '''bar'''}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 467
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = "imagegpt"
UpperCAmelCase = ["past_key_values"]
UpperCAmelCase = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __lowerCamelCase : List[Any]=5_1_2 + 1 , __lowerCamelCase : Dict=3_2 * 3_2 , __lowerCamelCase : List[str]=5_1_2 , __lowerCamelCase : List[Any]=2_4 , __lowerCamelCase : Any=8 , __lowerCamelCase : Tuple=None , __lowerCamelCase : Any="quick_gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , **__lowerCamelCase : str , ):
UpperCAmelCase__ :Dict = vocab_size
UpperCAmelCase__ :str = n_positions
UpperCAmelCase__ :Tuple = n_embd
UpperCAmelCase__ :Dict = n_layer
UpperCAmelCase__ :List[Any] = n_head
UpperCAmelCase__ :str = n_inner
UpperCAmelCase__ :Optional[Any] = activation_function
UpperCAmelCase__ :str = resid_pdrop
UpperCAmelCase__ :Optional[Any] = embd_pdrop
UpperCAmelCase__ :Tuple = attn_pdrop
UpperCAmelCase__ :int = layer_norm_epsilon
UpperCAmelCase__ :List[Any] = initializer_range
UpperCAmelCase__ :List[Any] = scale_attn_weights
UpperCAmelCase__ :List[str] = use_cache
UpperCAmelCase__ :Tuple = scale_attn_by_inverse_layer_idx
UpperCAmelCase__ :Union[str, Any] = reorder_and_upcast_attn
UpperCAmelCase__ :List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=__lowerCamelCase , **__lowerCamelCase )
class UpperCAmelCase ( _snake_case ):
@property
def __SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : "FeatureExtractionMixin" , __lowerCamelCase : int = 1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional["TensorType"] = None , __lowerCamelCase : int = 3 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : int = 3_2 , ):
UpperCAmelCase__ :Tuple = self._generate_dummy_images(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ :Dict = dict(preprocessor(images=__lowerCamelCase , return_tensors=__lowerCamelCase ) )
return inputs
| 467
| 1
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 496
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __UpperCamelCase( _A : Optional[int] , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Optional[int]=True , _A : List[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {'''add_prefix_space''': True} if isinstance(_A , _A ) and not line.startswith(''' ''' ) else {}
UpperCAmelCase__ : str = padding_side
return tokenizer(
[line] , max_length=_A , padding='''max_length''' if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def __UpperCamelCase( _A : Any , _A : Union[str, Any] , _A : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="train" ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="" ,) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = Path(lowerCamelCase_ ).joinpath(type_path + '''.source''' )
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ ).joinpath(type_path + '''.target''' )
UpperCAmelCase__ : Tuple = self.get_char_lens(self.src_file )
UpperCAmelCase__ : Union[str, Any] = max_source_length
UpperCAmelCase__ : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
UpperCAmelCase__ : str = tokenizer
UpperCAmelCase__ : Optional[int] = prefix
if n_obs is not None:
UpperCAmelCase__ : Optional[int] = self.src_lens[:n_obs]
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Dict = tgt_lang
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = index + 1 # linecache starts at 1
UpperCAmelCase__ : Tuple = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
UpperCAmelCase__ : List[str] = linecache.getline(str(self.tgt_file ) ,lowerCamelCase_ ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase__ : int = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
)
UpperCAmelCase__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
UpperCAmelCase__ : List[Any] = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_source_length ,'''right''' )
UpperCAmelCase__ : Dict = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_target_length ,'''right''' )
UpperCAmelCase__ : Any = source_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : Any = target_inputs['''input_ids'''].squeeze()
UpperCAmelCase__ : int = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ ) -> Any:
'''simple docstring'''
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
UpperCAmelCase__ : int = torch.stack([x['''attention_mask'''] for x in batch] )
UpperCAmelCase__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Optional[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
UpperCAmelCase__ : Dict = trim_batch(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = trim_batch(lowerCamelCase_ ,lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
UpperCAmelCase__ : str = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCamelCase__ : int = getLogger(__name__)
def __UpperCamelCase( _A : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def __UpperCamelCase( _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_git_info()
save_json(_A , os.path.join(_A , '''git_log.json''' ) )
def __UpperCamelCase( _A : Tuple , _A : Optional[Any] , _A : List[Any]=4 , **_A : Optional[Any] ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=_A , **_A )
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : int = git.Repo(search_parent_directories=_A )
UpperCAmelCase__ : int = {
'''repo_id''': str(_A ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def __UpperCamelCase( _A : Callable , _A : Iterable ):
'''simple docstring'''
return list(map(_A , _A ) )
def __UpperCamelCase( _A : str , _A : str ):
'''simple docstring'''
with open(_A , '''wb''' ) as f:
return pickle.dump(_A , _A )
def __UpperCamelCase( _A : int ):
'''simple docstring'''
def remove_articles(_A : int ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , _A )
def white_space_fix(_A : List[Any] ):
return " ".join(text.split() )
def remove_punc(_A : Tuple ):
UpperCAmelCase__ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A : Any ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase( _A : Optional[Any] , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = normalize_answer(_A ).split()
UpperCAmelCase__ : str = normalize_answer(_A ).split()
UpperCAmelCase__ : str = Counter(_A ) & Counter(_A )
UpperCAmelCase__ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase__ : Union[str, Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : List[Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : Dict = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase( _A : Dict , _A : Tuple ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def __UpperCamelCase( _A : List[str] , _A : List[str] ):
'''simple docstring'''
assert len(_A ) == len(_A )
UpperCAmelCase__ : Optional[int] = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def __UpperCamelCase( _A : int ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def __UpperCamelCase( _A : str , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase__ : Optional[int] = '''dropout_rate'''
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(_A ) )
delattr(_A , _A )
continue
UpperCAmelCase__ : int = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 496
| 1
|
"""simple docstring"""
from math import pow, sqrt
def _A ( *_a : float ):
"""simple docstring"""
A = len(_a ) > 0 and all(value > 0.0 for value in values )
return result
def _A ( _a : float , _a : float ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 617
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Any:
A = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
A = [2, 4, 6, 8, 1_0, 1_2]
A = 1_0_0
self.assertEqual(kp.calc_profit(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) ,2_1_0 )
def UpperCamelCase__ ( self ) -> List[Any]:
self.assertRaisesRegex(lowerCamelCase_ ,"""max_weight must greater than zero.""" )
def UpperCamelCase__ ( self ) -> Any:
self.assertRaisesRegex(lowerCamelCase_ ,"""Weight can not be negative.""" )
def UpperCamelCase__ ( self ) -> str:
self.assertRaisesRegex(lowerCamelCase_ ,"""Profit can not be negative.""" )
def UpperCamelCase__ ( self ) -> Optional[int]:
self.assertRaisesRegex(lowerCamelCase_ ,"""max_weight must greater than zero.""" )
def UpperCamelCase__ ( self ) -> str:
self.assertRaisesRegex(
lowerCamelCase_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 617
| 1
|
import numpy as np
def _A ( __A: np.ndarray ,__A: np.ndarray ,__A: float = 1e-12 ,__A: int = 1_0_0 ,):
'''simple docstring'''
assert np.shape(__A )[0] == np.shape(__A )[1]
# Ensure proper dimensionality.
assert np.shape(__A )[0] == np.shape(__A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__A ) == np.iscomplexobj(__A )
__magic_name__ : List[str] = np.iscomplexobj(__A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__A ,input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__magic_name__ : Any = False
__magic_name__ : Optional[int] = 0
__magic_name__ : Tuple = 0
__magic_name__ : Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__magic_name__ : List[str] = np.dot(__A ,__A )
# Normalize the resulting output vector.
__magic_name__ : Optional[int] = w / np.linalg.norm(__A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__magic_name__ : Any = vector.conj().T if is_complex else vector.T
__magic_name__ : Any = np.dot(__A ,np.dot(__A ,__A ) )
# Check convergence.
__magic_name__ : Tuple = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__magic_name__ : Optional[Any] = True
__magic_name__ : Any = lambda_
if is_complex:
__magic_name__ : int = np.real(lambda_ )
return lambda_, vector
def _A ( ):
'''simple docstring'''
__magic_name__ : Optional[Any] = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__magic_name__ : int = np.array([4_1, 4, 2_0] )
__magic_name__ : str = real_input_matrix.astype(np.complexaaa )
__magic_name__ : Optional[Any] = np.triu(1J * complex_input_matrix ,1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__magic_name__ : Union[str, Any] = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__magic_name__ : Dict = real_input_matrix
__magic_name__ : Optional[int] = real_vector
elif problem_type == "complex":
__magic_name__ : Dict = complex_input_matrix
__magic_name__ : List[str] = complex_vector
# Our implementation.
__magic_name__ : Optional[int] = power_iteration(__A ,__A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__magic_name__ : Dict = np.linalg.eigh(__A )
# Last eigenvalue is the maximum one.
__magic_name__ : List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__magic_name__ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__A ) - np.abs(__A ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 714
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =CanineTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
super().setUp()
__magic_name__ : Optional[int] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self : Dict , **lowerCamelCase_ : Optional[int] ) -> CanineTokenizer:
__magic_name__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
__magic_name__ : List[str] = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : List[Any] = self.canine_tokenizer
__magic_name__ : Any = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__magic_name__ : Optional[int] = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__magic_name__ : List[Any] = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
__magic_name__ : Any = self.canine_tokenizer
__magic_name__ : Dict = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__magic_name__ : Dict = tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , lowerCamelCase_ )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertIn('''token_type_ids''' , lowerCamelCase_ )
@require_torch
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : int = self.canine_tokenizer
__magic_name__ : Dict = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__magic_name__ : List[Any] = tokenizer(
text_target=lowerCamelCase_ , max_length=32 , padding='''max_length''' , truncation=lowerCamelCase_ , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : str = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Tuple = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
shutil.rmtree(lowerCamelCase_ )
__magic_name__ : Any = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Any = tempfile.mkdtemp()
__magic_name__ : Any = ''' He is very happy, UNwant\u00E9d,running'''
__magic_name__ : str = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ : str = chr(0XE007 )
additional_special_tokens.append(lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__magic_name__ : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ )
__magic_name__ : Optional[int] = after_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertIn(lowerCamelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : List[Any] = tokenizer.__class__.from_pretrained(lowerCamelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
__magic_name__ : Any = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ , __magic_name__ : str = self.get_clean_sequence(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : Optional[Any] = 0XE005
__magic_name__ : Optional[Any] = chr(lowerCamelCase_ )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__magic_name__ : Optional[int] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
__magic_name__ : Union[str, Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCamelCase_ )
__magic_name__ : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Optional[Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , input_encoded + special_token_id )
__magic_name__ : Any = tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__ : str = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Tuple = chr(0XE005 )
__magic_name__ : Optional[int] = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCamelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
__magic_name__ : Optional[int] = tokenizer.tokenize(lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCamelCase_ )
self.assertEqual(token_a[0] , lowerCamelCase_ )
@require_tokenizers
def UpperCAmelCase__ ( self : str ) -> List[str]:
__magic_name__ : List[Any] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__magic_name__ : str = 0XE006
__magic_name__ : Optional[int] = chr(lowerCamelCase_ )
__magic_name__ : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCamelCase_ )
tokenizer.from_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : List[Any] = json.load(lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__magic_name__ : int = json.load(lowerCamelCase_ )
# a special token for Canine can be defined as follows:
__magic_name__ : List[str] = 0XE006
__magic_name__ : List[str] = chr(lowerCamelCase_ )
__magic_name__ : int = [new_token_a]
__magic_name__ : str = [new_token_a]
with open(os.path.join(lowerCamelCase_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
with open(os.path.join(lowerCamelCase_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : int = tokenizer_class.from_pretrained(lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ : Optional[int] = 0XE007
__magic_name__ : List[str] = chr(lowerCamelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : List[str] = [AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ )]
__magic_name__ : str = tokenizer_class.from_pretrained(
lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , extra_ids=0 )
self.assertIn(lowerCamelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self : Any ) -> List[str]:
__magic_name__ : Optional[int] = self.get_tokenizers(do_lower_case=lowerCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : Union[str, Any] = '''hello world'''
if self.space_between_special_tokens:
__magic_name__ : List[Any] = '''[CLS] hello world [SEP]'''
else:
__magic_name__ : List[str] = input
__magic_name__ : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
__magic_name__ : Union[str, Any] = tokenizer.decode(lowerCamelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCamelCase_ , [output, output.lower()] )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ : str = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__magic_name__ : Any = '''a'''
__magic_name__ : List[str] = ord(lowerCamelCase_ )
for attr in attributes_list:
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , attr + '''_id''' , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(getattr(lowerCamelCase_ , attr + '''_id''' ) , lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [] )
__magic_name__ : Any = 0XE006
__magic_name__ : str = chr(lowerCamelCase_ )
setattr(lowerCamelCase_ , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCamelCase_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self : Tuple ) -> Any:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
pass
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
pass
| 501
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowercase : Any = logging.get_logger(__name__)
def lowercase__ ( snake_case_ :Union[tf.Tensor, np.ndarray] ):
if isinstance(snake_case_ , np.ndarray ):
return list(tensor.shape )
__UpperCAmelCase = tf.shape(snake_case_ )
if tensor.shape == tf.TensorShape(snake_case_ ):
return dynamic
__UpperCAmelCase = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case_ )]
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :Optional[int] = None , snake_case_ :Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1E-9 , axis=snake_case_ , name=snake_case_ )
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Any , snake_case_ :Optional[Any] , snake_case_ :List[Any]=1E-5 , snake_case_ :Optional[Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case_ , snake_case_ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
__UpperCAmelCase , __UpperCAmelCase = tf.nn.moments(snake_case_ , axes=[axis] , keepdims=snake_case_ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__UpperCAmelCase = [1] * inputs.shape.rank
__UpperCAmelCase = shape_list(snake_case_ )[axis]
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
__UpperCAmelCase = tf.reshape(snake_case_ , snake_case_ )
# Compute layer normalization using the batch_normalization
# function.
__UpperCAmelCase = tf.nn.batch_normalization(
snake_case_ , snake_case_ , snake_case_ , offset=snake_case_ , scale=snake_case_ , variance_epsilon=snake_case_ , )
return outputs
def lowercase__ ( snake_case_ :Any , snake_case_ :str=0 , snake_case_ :Union[str, Any]=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__UpperCAmelCase = tf.shape(snake_case_ )
__UpperCAmelCase = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__UpperCAmelCase = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :tf.Tensor ):
if not isinstance(snake_case_ , tf.Tensor ):
__UpperCAmelCase = tf.convert_to_tensor(snake_case_ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__UpperCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__UpperCAmelCase = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__UpperCAmelCase = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowercase__ ( snake_case_ :tf.Tensor , snake_case_ :int , snake_case_ :str = "input_ids" ):
tf.debugging.assert_less(
snake_case_ , tf.cast(snake_case_ , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case_ )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowercase__ ( snake_case_ :Tuple , snake_case_ :int , snake_case_ :str ):
__UpperCAmelCase = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__UpperCAmelCase = [x for x in data if len(snake_case_ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
__UpperCAmelCase = np.asarray(snake_case_ )
__UpperCAmelCase = 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__UpperCAmelCase = np.array_split(snake_case_ , snake_case_ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case_ ):
__UpperCAmelCase = chunk_data
else:
__UpperCAmelCase = data
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Dict ):
if name in group.attrs:
__UpperCAmelCase = [n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs[name]]
else:
__UpperCAmelCase = []
__UpperCAmelCase = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(snake_case_ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowercase__ ( snake_case_ :Dict ):
def _expand_single_ad_tensor(snake_case_ :Tuple ):
if isinstance(snake_case_ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case_ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , snake_case_ )
| 49
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : Union[str, Any] = 3_84
lowercase_ : Union[str, Any] = 7
if "tiny" in model_name:
lowercase_ : int = 96
lowercase_ : List[Any] = (2, 2, 6, 2)
lowercase_ : Union[str, Any] = (3, 6, 12, 24)
elif "small" in model_name:
lowercase_ : Optional[int] = 96
lowercase_ : List[Any] = (2, 2, 18, 2)
lowercase_ : List[Any] = (3, 6, 12, 24)
elif "base" in model_name:
lowercase_ : Any = 1_28
lowercase_ : Tuple = (2, 2, 18, 2)
lowercase_ : Optional[int] = (4, 8, 16, 32)
lowercase_ : Union[str, Any] = 12
lowercase_ : Optional[int] = 5_12
elif "large" in model_name:
lowercase_ : Union[str, Any] = 1_92
lowercase_ : Any = (2, 2, 18, 2)
lowercase_ : int = (6, 12, 24, 48)
lowercase_ : Optional[Any] = 12
lowercase_ : Union[str, Any] = 7_68
# set label information
lowercase_ : Union[str, Any] = 1_50
lowercase_ : Union[str, Any] = 'huggingface/label-files'
lowercase_ : Any = 'ade20k-id2label.json'
lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : Any = {v: k for k, v in idalabel.items()}
lowercase_ : Any = SwinConfig(
embed_dim=__SCREAMING_SNAKE_CASE , depths=__SCREAMING_SNAKE_CASE , num_heads=__SCREAMING_SNAKE_CASE , window_size=__SCREAMING_SNAKE_CASE , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
lowercase_ : int = UperNetConfig(
backbone_config=__SCREAMING_SNAKE_CASE , auxiliary_in_channels=__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE , )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : str = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowercase_ : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowercase_ : Dict = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowercase_ : List[Any] = state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:dim, :]
lowercase_ : List[str] = in_proj_bias[: dim]
lowercase_ : int = in_proj_weight[
dim : dim * 2, :
]
lowercase_ : List[Any] = in_proj_bias[
dim : dim * 2
]
lowercase_ : Optional[Any] = in_proj_weight[
-dim :, :
]
lowercase_ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ , lowercase_ : List[Any] = x.shape
lowercase_ : str = x.reshape(__SCREAMING_SNAKE_CASE , 4 , in_channel // 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ , lowercase_ : List[str] = x.shape
lowercase_ : List[str] = x.reshape(__SCREAMING_SNAKE_CASE , in_channel // 4 , 4 )
lowercase_ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[str] = x.shape[0]
lowercase_ : List[str] = x.reshape(4 , in_channel // 4 )
lowercase_ : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = x.shape[0]
lowercase_ : List[str] = x.reshape(in_channel // 4 , 4 )
lowercase_ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__SCREAMING_SNAKE_CASE )
return x
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
lowercase_ : List[Any] = model_name_to_url[model_name]
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' , file_name=__SCREAMING_SNAKE_CASE )[
'state_dict'
]
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
lowercase_ : int = get_upernet_config(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = UperNetForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE )
if "bn" in key:
lowercase_ : List[Any] = key.replace('bn' , 'batch_norm' )
lowercase_ : Optional[Any] = val
# rename keys
lowercase_ : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowercase_ : List[str] = reverse_correct_unfold_reduction_order(__SCREAMING_SNAKE_CASE )
if "norm" in key:
lowercase_ : str = reverse_correct_unfold_norm_order(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify on image
lowercase_ : Optional[int] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
lowercase_ : Union[str, Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
lowercase_ : Any = SegformerImageProcessor()
lowercase_ : str = processor(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
with torch.no_grad():
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowercase_ : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowercase_ : Tuple = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowercase_ : Tuple = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowercase_ : Tuple = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 425
| 0
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
_A = parser.parse_args()
_A = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A = CLIPImageProcessor()
_A = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
_A = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 718
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
_A = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
_A = logging.WARNING
def lowercase_ ( ) -> Optional[int]:
lowerCAmelCase__ : List[str] = os.getenv("""DATASETS_VERBOSITY""" , __UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def lowercase_ ( ) -> str:
return __name__.split(""".""" )[0]
def lowercase_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def lowercase_ ( ) -> None:
# Apply our default configuration to the library root logger.
lowerCAmelCase__ : Tuple = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : int = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( __UpperCAmelCase = None ) -> logging.Logger:
if name is None:
lowerCAmelCase__ : Union[str, Any] = _get_library_name()
return logging.getLogger(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( __UpperCAmelCase ) -> None:
_get_library_root_logger().setLevel(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> str:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> List[str]:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> int:
return set_verbosity(__UpperCAmelCase )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Optional[int] = False
def lowercase_ ( ) -> None:
lowerCAmelCase__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _lowerCamelCase :
def __init__( self : Optional[int] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[str] ) -> Dict: # pylint: disable=unused-argument
"""simple docstring"""
lowerCAmelCase__ : Dict = args[0] if args else None
def __iter__( self : Dict ) -> Dict:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : str , UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
def empty_fn(*UpperCamelCase : Optional[int] , **UpperCamelCase : int ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Tuple ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return
_A = True
class _lowerCamelCase :
def __call__( self : Union[str, Any] , *UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=False , **UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase , **UpperCamelCase )
else:
return EmptyTqdm(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : Tuple , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase , **UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_A = _tqdm_cls()
def lowercase_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ) -> Any:
global _tqdm_active
lowerCAmelCase__ : List[str] = True
def lowercase_ ( ) -> Tuple:
global _tqdm_active
lowerCAmelCase__ : List[str] = False
| 507
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 521
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
_SCREAMING_SNAKE_CASE : List[str] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 1
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = "generated"
def __init__(self : Dict , *snake_case_ : Optional[Any] , **snake_case_ : List[Any] ):
super().__init__(*snake_case_ , **snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowerCAmelCase (self : Any , snake_case_ : Optional[Any]=None , snake_case_ : Dict=None , snake_case_ : List[Any]=None , snake_case_ : Dict=None , snake_case_ : Any=None , snake_case_ : int=None , **snake_case_ : List[str] , ):
__a : Dict = {}
if truncation is not None:
__a : str = truncation
__a : Tuple = generate_kwargs
__a : Optional[int] = {}
if return_tensors is not None and return_type is None:
__a : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__a : str = return_type
if clean_up_tokenization_spaces is not None:
__a : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Dict = self.tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
if len(snake_case_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : Tuple = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCAmelCase (self : List[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
return True
def lowerCAmelCase (self : List[Any] , *snake_case_ : str , snake_case_ : Dict ):
__a : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , snake_case_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__a : List[str] = ([prefix + arg for arg in args[0]],)
__a : List[Any] = True
elif isinstance(args[0] , snake_case_ ):
__a : str = (prefix + args[0],)
__a : int = False
else:
raise ValueError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`" )
__a : Any = self.tokenizer(*snake_case_ , padding=snake_case_ , truncation=snake_case_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self : int , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
__a : str = super().__call__(*snake_case_ , **snake_case_ )
if (
isinstance(args[0] , snake_case_ )
and all(isinstance(snake_case_ , snake_case_ ) for el in args[0] )
and all(len(snake_case_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **snake_case_ : int ):
__a : Optional[int] = self._parse_and_tokenize(snake_case_ , truncation=snake_case_ , **snake_case_ )
return inputs
def lowerCAmelCase (self : Any , snake_case_ : List[str] , **snake_case_ : Union[str, Any] ):
if self.framework == "pt":
__a , __a : List[str] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__a , __a : List[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
__a : Optional[Any] = generate_kwargs.get('''min_length''' , self.model.config.min_length )
__a : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(snake_case_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__a : str = self.model.generate(**snake_case_ , **snake_case_ )
__a : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
__a : Optional[Any] = output_ids.reshape(snake_case_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__a : int = tf.reshape(snake_case_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : List[str]=ReturnType.TEXT , snake_case_ : str=False ):
__a : Optional[int] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__a : Optional[Any] = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
__a : str = {
f"{self.return_name}_text": self.tokenizer.decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ , )
}
records.append(snake_case_ )
return records
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Tuple = "summary"
def __call__(self : Optional[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return super().__call__(*snake_case_ , **snake_case_ )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if max_length < min_length:
logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}." )
if input_length < max_length:
logger.warning(
f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})" )
@add_end_docstrings(__lowercase )
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Dict = "translation"
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowerCAmelCase (self : Any , *snake_case_ : int , snake_case_ : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , snake_case_ : Any=None , snake_case_ : Tuple=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , snake_case_ ):
return self.tokenizer._build_translation_inputs(
*snake_case_ , return_tensors=self.framework , truncation=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ )
else:
return super()._parse_and_tokenize(*snake_case_ , truncation=snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int=None , snake_case_ : str=None , **snake_case_ : Optional[Any] ):
__a , __a , __a : str = super()._sanitize_parameters(**snake_case_ )
if src_lang is not None:
__a : Optional[int] = src_lang
if tgt_lang is not None:
__a : Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__a : int = kwargs.get('''task''' , self.task )
__a : Union[str, Any] = task.split('''_''' )
if task and len(snake_case_ ) == 4:
# translation, XX, to YY
__a : str = items[1]
__a : str = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self : Optional[int] , *snake_case_ : Optional[Any] , **snake_case_ : Any ):
return super().__call__(*snake_case_ , **snake_case_ )
| 521
| 1
|
'''simple docstring'''
def _a( UpperCamelCase__ : list[list[float]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for data in source_data:
for i, el in enumerate(UpperCamelCase__ ):
if len(UpperCamelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCamelCase__ ) )
return data_lists
def _a( UpperCamelCase__ : list[list[float]], UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[list[float]] =[]
for dlist, weight in zip(UpperCamelCase__, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =min(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =max(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : list[float] =[]
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE__ : List[str] =f"Invalid weight of {weight:f} provided"
raise ValueError(UpperCamelCase__ )
score_lists.append(UpperCamelCase__ )
return score_lists
def _a( UpperCamelCase__ : list[list[float]] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[float] =[0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =final_scores[j] + ele
return final_scores
def _a( UpperCamelCase__ : list[list[float]], UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =get_data(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =calculate_each_score(UpperCamelCase__, UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =generate_final_scores(UpperCamelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCamelCase__ ):
source_data[i].append(UpperCamelCase__ )
return source_data
| 665
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __magic_name__ ( *__lowercase : int , **__lowercase : Optional[Any] ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@require_torch
def __magic_name__ ( self : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE__ : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__lowercase ) , [
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}],
[{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE__ : int =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
SCREAMING_SNAKE_CASE__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =image_classifier(__lowercase , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
[
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
{'''score''': 0.333, '''label''': ANY(__lowercase )},
],
] , )
@slow
@require_torch
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __magic_name__ ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ : str =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE__ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
SCREAMING_SNAKE_CASE__ : Any =image_classifier(__lowercase , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(__lowercase ) , [
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE__ : List[Any] =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(__lowercase ) , [
[
{'''score''': 0.511, '''label''': '''remote'''},
{'''score''': 0.485, '''label''': '''cat'''},
{'''score''': 0.004, '''label''': '''plane'''},
],
]
* 5 , )
| 665
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def _lowercase ( __snake_case ,__snake_case=0 ) -> Tuple:
return sorted(__snake_case ,key=lambda __snake_case : x[column] )
def _lowercase ( __snake_case ,__snake_case ,__snake_case=float("inf" ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 ,__snake_case ):
__lowerCAmelCase : Optional[int] = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
__lowerCAmelCase : List[Any] = current_dis
return min_dis
def _lowercase ( __snake_case ,__snake_case ,__snake_case=float("inf" ) ) -> List[Any]:
for i in range(min(6 ,points_counts - 1 ) ,__snake_case ):
for j in range(max(0 ,i - 6 ) ,__snake_case ):
__lowerCAmelCase : Any = euclidean_distance_sqr(points[i] ,points[j] )
if current_dis < min_dis:
__lowerCAmelCase : Optional[int] = current_dis
return min_dis
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Tuple:
# base case
if points_counts <= 3:
return dis_between_closest_pair(__snake_case ,__snake_case )
# recursion
__lowerCAmelCase : Optional[int] = points_counts // 2
__lowerCAmelCase : str = closest_pair_of_points_sqr(
__snake_case ,points_sorted_on_y[:mid] ,__snake_case )
__lowerCAmelCase : Any = closest_pair_of_points_sqr(
__snake_case ,points_sorted_on_y[mid:] ,points_counts - mid )
__lowerCAmelCase : Optional[int] = min(__snake_case ,__snake_case )
__lowerCAmelCase : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__snake_case )
__lowerCAmelCase : Any = dis_between_closest_in_strip(
__snake_case ,len(__snake_case ) ,__snake_case )
return min(__snake_case ,__snake_case )
def _lowercase ( __snake_case ,__snake_case ) -> Union[str, Any]:
__lowerCAmelCase : str = column_based_sort(__snake_case ,column=0 )
__lowerCAmelCase : Tuple = column_based_sort(__snake_case ,column=1 )
return (
closest_pair_of_points_sqr(
__snake_case ,__snake_case ,__snake_case )
) ** 0.5
if __name__ == "__main__":
__snake_case : List[str] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('Distance:', closest_pair_of_points(points, len(points)))
| 293
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: float , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowerCAmelCase : str = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , d_model=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , feed_forward_proj=_SCREAMING_SNAKE_CASE , is_decoder=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = TaBlock(_SCREAMING_SNAKE_CASE)
self.encoders.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = TaLayerNorm(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = nn.Dropout(p=_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.token_embedder(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = encoder_input_tokens.shape[1]
__lowerCAmelCase : List[Any] = torch.arange(_SCREAMING_SNAKE_CASE , device=encoder_input_tokens.device)
x += self.position_encoding(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.dropout_pre(_SCREAMING_SNAKE_CASE)
# inverted the attention mask
__lowerCAmelCase : List[Any] = encoder_input_tokens.size()
__lowerCAmelCase : Any = self.get_extended_attention_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
for lyr in self.encoders:
__lowerCAmelCase : Union[str, Any] = lyr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : int = self.layer_norm(_SCREAMING_SNAKE_CASE)
return self.dropout_post(_SCREAMING_SNAKE_CASE), encoder_inputs_mask
| 293
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = MobileBertTokenizer
_snake_case = MobileBertTokenizerFast
_snake_case = True
_snake_case = True
_snake_case = filter_non_english
_snake_case = """google/mobilebert-uncased"""
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
super().setUp()
__snake_case : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__snake_case : str = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__ (self , __a) -> Tuple:
"""simple docstring"""
__snake_case : Any = 'UNwant\u00E9d,running'
__snake_case : Union[str, Any] = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Dict = self.tokenizer_class(self.vocab_file)
__snake_case : int = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [9, 6, 7, 1_2, 1_0, 1_1])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__snake_case : Dict = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : int = 'UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.tokenize(__a)
__snake_case : int = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : int = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[int] = self.get_rust_tokenizer()
__snake_case : List[str] = tokenizer.encode(__a)
__snake_case : int = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
# With lower casing
__snake_case : Tuple = self.get_tokenizer(do_lower_case=__a)
__snake_case : int = self.get_rust_tokenizer(do_lower_case=__a)
__snake_case : Optional[int] = 'UNwant\u00E9d,running'
__snake_case : str = tokenizer.tokenize(__a)
__snake_case : Any = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a , __a)
__snake_case : List[str] = tokenizer.encode(__a , add_special_tokens=__a)
__snake_case : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a)
self.assertListEqual(__a , __a)
__snake_case : Optional[Any] = self.get_rust_tokenizer()
__snake_case : Union[str, Any] = tokenizer.encode(__a)
__snake_case : Optional[Any] = rust_tokenizer.encode(__a)
self.assertListEqual(__a , __a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
__snake_case : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Dict = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def SCREAMING_SNAKE_CASE__ (self) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__snake_case : Optional[Any] = {}
for i, token in enumerate(__a):
__snake_case : str = i
__snake_case : Optional[Any] = WordpieceTokenizer(vocab=__a , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']])
@slow
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
__snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=__a)
__snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a)
__snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a)
__snake_case : str = tokenizer.build_inputs_with_special_tokens(__a , __a)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def SCREAMING_SNAKE_CASE__ (self) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Dict = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__snake_case : Union[str, Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__snake_case : List[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case') else False
__snake_case : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'])
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = ['的', '人', '有']
__snake_case : List[str] = ''.join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__snake_case : Any = True
__snake_case : int = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : int = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : List[Any] = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : Optional[Any] = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : List[Any] = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
__snake_case : Tuple = False
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a)
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(__a , **__a)
__snake_case : str = tokenizer_r.encode(__a , add_special_tokens=__a)
__snake_case : int = tokenizer_p.encode(__a , add_special_tokens=__a)
__snake_case : str = tokenizer_r.convert_ids_to_tokens(__a)
__snake_case : Any = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case : Optional[Any] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , __a)
| 61
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a_ ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=1_8 , __a=3_0 , __a=4_0_0 , __a=True , __a=None , __a=True , __a=None , __a=True , __a=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __a=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __a=True , ) -> List[Any]:
"""simple docstring"""
__snake_case : Tuple = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
__snake_case : Any = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
__snake_case : Optional[int] = parent
__snake_case : Dict = batch_size
__snake_case : str = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[int] = do_resize
__snake_case : Optional[int] = size
__snake_case : Union[str, Any] = do_center_crop
__snake_case : List[Any] = crop_size
__snake_case : int = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
__snake_case : Optional[Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE__ (self , __a=False , __a=False , __a=False) -> List[str]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__snake_case : Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__snake_case : Dict = []
for i in range(self.batch_size):
__snake_case ,__snake_case : Optional[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__snake_case : int = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs]
if torchify:
__snake_case : List[Any] = [torch.from_numpy(__a) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=__a)
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 2_2_4, 'width': 2_2_4})
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8})
__snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {'shortest_edge': 4_2})
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4})
def SCREAMING_SNAKE_CASE__ (self) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
__snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : int = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class a_ ( UpperCamelCase_ , unittest.TestCase ):
_snake_case = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__a)
__snake_case : List[Any] = 3
@property
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ (self) -> Dict:
"""simple docstring"""
__snake_case : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , 'do_resize'))
self.assertTrue(hasattr(__a , 'size'))
self.assertTrue(hasattr(__a , 'do_center_crop'))
self.assertTrue(hasattr(__a , 'center_crop'))
self.assertTrue(hasattr(__a , 'do_normalize'))
self.assertTrue(hasattr(__a , 'image_mean'))
self.assertTrue(hasattr(__a , 'image_std'))
self.assertTrue(hasattr(__a , 'do_convert_rgb'))
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ (self) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(__a , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 61
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__SCREAMING_SNAKE_CASE : Optional[int] =re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__SCREAMING_SNAKE_CASE : List[Any] =10
__SCREAMING_SNAKE_CASE : Optional[int] =256
def UpperCamelCase__ ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=lowerCAmelCase__ )
for token in set(lowerCAmelCase__ ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase__ ( lowerCAmelCase__ ):
return {t for t in NON_ALPHA.split(lowerCAmelCase__ ) if len(t.strip() ) > 0}
class A_ :
def __init__( self : List[Any] , *,
snake_case__ : float = 0.85 , ):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase = defaultdict(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Tuple , snake_case__ : MinHash ):
lowercase = self._index.query(snake_case__ )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(snake_case__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(snake_case__ )
# reformat the cluster to be a list of dict
lowercase = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(snake_case__ )
return duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Any ):
lowercase = self.get_duplicate_clusters()
with open(snake_case__ , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase__ ( lowerCAmelCase__ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(lowerCAmelCase__ ,max_queue_size=10_000 ) ,chunksize=100 ,):
if data is not None:
yield data
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase__ ) ) ,max_queue_size=100 ) ):
di.add(lowerCAmelCase__ ,lowerCAmelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = get_tokens(lowerCAmelCase__ )
lowercase = get_tokens(lowerCAmelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__SCREAMING_SNAKE_CASE : Union[str, Any] =None
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
lowercase = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase__ ,lowerCAmelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(lowerCAmelCase__ )
return extremes
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared ,jaccard_threshold=lowerCAmelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase__ ,lowerCAmelCase__ ,) ,total=len(lowerCAmelCase__ ) ,):
extremes_list.append(lowerCAmelCase__ )
return extremes_list
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.85 ):
lowercase = make_duplicate_clusters(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda lowerCAmelCase__ ,lowerCAmelCase__ : idx not in remove_indices ,with_indices=lowerCAmelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"""Original dataset size: {len(lowerCAmelCase__ )}""" )
print(f"""Number of duplicate clusters: {len(lowerCAmelCase__ )}""" )
print(f"""Files in duplicate cluster: {len(lowerCAmelCase__ )}""" )
print(f"""Unique files in duplicate cluster: {len(lowerCAmelCase__ )}""" )
print(f"""Filtered dataset size: {len(lowerCAmelCase__ )}""" )
return ds_filter, duplicate_clusters
| 428
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE : Union[str, Any] =False
class A_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = """A painting of a squirrel eating a burger """
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = generator.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
lowercase = """A painting of a squirrel eating a burger """
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
lowercase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 428
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A__ ( __snake_case ):
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Any = []
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_init_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_train_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_train_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_epoch_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_epoch_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_step_begin" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_step_end" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_evaluate" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_predict" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_save" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_log" )
def __UpperCamelCase( self , A_ , A_ , A_ , **A_ ):
'''simple docstring'''
self.events.append("on_prediction_step" )
@require_torch
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = tempfile.mkdtemp()
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __UpperCamelCase( self , A_=0 , A_=0 , A_=64 , A_=64 , A_=None , A_=False , **A_ ):
'''simple docstring'''
UpperCamelCase : Dict = RegressionDataset(length=A_ )
UpperCamelCase : int = RegressionDataset(length=A_ )
UpperCamelCase : List[Any] = RegressionModelConfig(a=A_ , b=A_ )
UpperCamelCase : Any = RegressionPreTrainedModel(A_ )
UpperCamelCase : Any = TrainingArguments(self.output_dir , disable_tqdm=A_ , report_to=[] , **A_ )
return Trainer(
A_ , A_ , train_dataset=A_ , eval_dataset=A_ , callbacks=A_ , )
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
# Order doesn't matter
UpperCamelCase : Optional[int] = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
UpperCamelCase : Optional[Any] = sorted(A_ , key=lambda A_ : cb.__name__ if isinstance(A_ , A_ ) else cb.__class__.__name__ )
for cba, cba in zip(A_ , A_ ):
if isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(A_ , A_ )
elif isinstance(A_ , A_ ) and not isinstance(A_ , A_ ):
self.assertEqual(A_ , cba.__class__ )
elif not isinstance(A_ , A_ ) and isinstance(A_ , A_ ):
self.assertEqual(cba.__class__ , A_ )
else:
self.assertEqual(A_ , A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = ["on_init_end", "on_train_begin"]
UpperCamelCase : int = 0
UpperCamelCase : str = len(trainer.get_eval_dataloader() )
UpperCamelCase : Tuple = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_trainer()
UpperCamelCase : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# Callbacks passed at init are added to the default callbacks
UpperCamelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCamelCase : List[str] = self.get_trainer(disable_tqdm=A_ )
UpperCamelCase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCamelCase : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
UpperCamelCase : List[Any] = self.get_trainer()
UpperCamelCase : Optional[int] = trainer.pop_callback(A_ )
self.assertEqual(cb.__class__ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
# We can also add, pop, or remove by instance
UpperCamelCase : Dict = self.get_trainer()
UpperCamelCase : int = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A_ )
expected_callbacks.remove(A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
UpperCamelCase : List[Any] = self.get_trainer()
UpperCamelCase : Tuple = trainer.callback_handler.callbacks[0]
UpperCamelCase : Optional[Any] = trainer.pop_callback(A_ )
self.assertEqual(A_ , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
trainer.add_callback(A_ )
expected_callbacks.insert(0 , A_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A_ )
UpperCamelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCamelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# Independent log/save/eval
UpperCamelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCamelCase : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCamelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
UpperCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
UpperCamelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
UpperCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# A bit of everything
UpperCamelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
UpperCamelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A_ , self.get_expected_events(A_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCamelCase : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A_ ) in warn_mock.call_args[0][0]
| 38
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ ( __snake_case ):
_UpperCAmelCase :Tuple = ['audio_values', 'audio_mask']
def __init__( self , A_=2048 , A_=1 , A_=[16, 16] , A_=128 , A_=4_4100 , A_=86 , A_=2048 , A_=0.0 , **A_ , ):
'''simple docstring'''
super().__init__(
feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_ , )
UpperCamelCase : Optional[int] = spectrogram_length
UpperCamelCase : Dict = num_channels
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = feature_size // self.patch_size[1]
UpperCamelCase : List[str] = n_fft
UpperCamelCase : int = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase : Optional[int] = sampling_rate
UpperCamelCase : int = padding_value
UpperCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A_ , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A_ , norm="slaney" , mel_scale="slaney" , ).T
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = spectrogram(
A_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase : List[Any] = log_spec[:, :-1]
UpperCamelCase : Optional[int] = log_spec - 20.0
UpperCamelCase : str = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A_ , A_ = None , A_ = True , A_ = None , A_ = False , A_ = False , **A_ , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase : Optional[int] = isinstance(A_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCamelCase : Union[str, Any] = is_batched_numpy or (
isinstance(A_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A_ , np.ndarray ):
UpperCamelCase : str = np.asarray(A_ , dtype=np.floataa )
elif isinstance(A_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A_ ):
UpperCamelCase : int = [np.asarray(A_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase : List[str] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase : str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase : Tuple = np.array(A_ ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase : Union[str, Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase : Any = np.ones([len(A_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase : List[str] = padded_audio_features * self.padding_value
for i in range(len(A_ ) ):
UpperCamelCase : Union[str, Any] = audio_features[i]
UpperCamelCase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase : Optional[Any] = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase : int = {"audio_values": padded_audio_features}
UpperCamelCase : Any = BatchFeature(data=A_ , tensor_type=A_ )
return encoded_inputs
| 38
| 1
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : float | Decimal, lowerCamelCase__ : float = 10**-10 ):
_a = a
while True:
_a = Decimal(lowerCamelCase__ ) - (
Decimal(eval(lowerCamelCase__ ) ) / Decimal(eval(str(diff(lowerCamelCase__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(lowerCamelCase__ ) ) < precision: # noqa: S307
return float(lowerCamelCase__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 131
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=2 , snake_case_=9_9 , snake_case_=0 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=2 , snake_case_=4 , snake_case_="last" , snake_case_=True , snake_case_=None , snake_case_=0 , ) -> Any:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_lengths
_a = use_token_type_ids
_a = use_labels
_a = gelu_activation
_a = sinusoidal_embeddings
_a = causal
_a = asm
_a = n_langs
_a = vocab_size
_a = n_special
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = summary_type
_a = use_proj
_a = scope
_a = bos_token_id
def __lowerCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_input_lengths:
_a = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , 2 ).float()
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self ) -> str:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
_a = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
_a = model(snake_case_ , langs=snake_case_ )
_a = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
_a = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
_a = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Optional[int]:
_a = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
_a = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
((_a) , ) = result_with_labels.to_tuple()
_a = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
((_a) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Tuple:
_a = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ )
_a = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> Union[str, Any]:
_a = self.num_labels
_a = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_a = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) -> str:
_a = self.num_choices
_a = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : str = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : int = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : List[Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_=False ) -> List[Any]:
_a = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
_a = XLMModelTester(self )
_a = ConfigTester(self , config_class=snake_case_ , emb_dim=3_7 )
def __lowerCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def __lowerCAmelCase ( self ) -> Dict:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def __lowerCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ) -> Dict:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = min_length + idx + 1
_a = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=1 ) -> Dict:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
_a = min_length + idx + 1
_a = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(snake_case_ )
_a = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case_ ) # the president
_a = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_a = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 131
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.0_2 , __a=["stage2", "stage3", "stage4"] , __a=[2, 3, 4] , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str =(
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[str] =True
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : List[str] =False
__UpperCAmelCase : Tuple =False
def snake_case ( self ):
__lowerCAmelCase = ConvNextModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def snake_case ( self ):
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def snake_case ( self ):
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def snake_case ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def snake_case ( self ):
__lowerCAmelCase = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =(ConvNextBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Dict =ConvNextConfig
__UpperCAmelCase : List[str] =False
def snake_case ( self ):
__lowerCAmelCase = ConvNextModelTester(self )
| 282
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=10 , __a=3 , __a=2 , __a=2 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.0_2 , __a="divided_space_time" , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_frames
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = attention_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = (num_frames) * self.num_patches_per_frame + 1
def snake_case ( self ):
__lowerCAmelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
__lowerCAmelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCAmelCase = self.num_labels
return config
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TimesformerModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TimesformerForVideoClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a )
# verify the logits shape
__lowerCAmelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __a )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__UpperCAmelCase : Tuple =(
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =False
__UpperCAmelCase : Optional[int] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Any =False
def snake_case ( self ):
__lowerCAmelCase = TimesformerModelTester(self )
__lowerCAmelCase = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = copy.deepcopy(__a )
if return_labels:
if model_class in get_values(__a ):
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__a )
@slow
def snake_case ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TimesformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( self ):
if not self.has_attentions:
pass
else:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
__lowerCAmelCase = self.model_tester.seq_length
__lowerCAmelCase = self.model_tester.num_frames
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCAmelCase = len(__a )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
__lowerCAmelCase = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def snake_case ( self ):
def check_hidden_states_output(__a , __a , __a ):
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__a ) , __a )
__lowerCAmelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(__a , __a , __a )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
__lowerCAmelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
__lowerCAmelCase = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__a )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_video()
__lowerCAmelCase = image_processor(video[:8] , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
# verify the logits
__lowerCAmelCase = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __a )
__lowerCAmelCase = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 282
| 1
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """T5Config"""
class lowerCamelCase_ ( lowercase ):
__lowercase : Optional[int] = "mt5"
__lowercase : Dict = MTaConfig
class lowerCamelCase_ ( lowercase ):
__lowercase : Union[str, Any] = "mt5"
__lowercase : int = MTaConfig
class lowerCamelCase_ ( lowercase ):
__lowercase : Optional[int] = "mt5"
__lowercase : Tuple = MTaConfig
| 147
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowercase ):
__lowercase : str = ["note_seq"]
def __init__( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
@classmethod
def lowercase ( cls , *lowerCamelCase_ , **lowerCamelCase_ ) -> str:
"""simple docstring"""
requires_backends(cls , ["note_seq"] )
| 147
| 1
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Dict , __magic_name__ : WhisperForConditionalGeneration , __magic_name__ : WhisperProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__magic_name__ , speech_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , feature_extractor=__magic_name__ , )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int:
if slice_size == "auto":
lowerCamelCase_ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
self.enable_attention_slicing(__magic_name__ )
@torch.no_grad()
def __call__( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any]=1_6000 , __magic_name__ : int = 512 , __magic_name__ : int = 512 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : List[str] , ) -> Any:
lowerCamelCase_ : Union[str, Any] = self.speech_processor.feature_extractor(
__magic_name__ , return_tensors="pt" , sampling_rate=__magic_name__ ).input_features.to(self.device )
lowerCamelCase_ : Tuple = self.speech_model.generate(__magic_name__ , max_length=48_0000 )
lowerCamelCase_ : Any = self.speech_processor.tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , normalize=__magic_name__ )[
0
]
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : List[str] = 1
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : int = len(__magic_name__ )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(__magic_name__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__magic_name__ , __magic_name__ ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(__magic_name__ )}." )
# get prompt text embeddings
lowerCamelCase_ : Dict = self.tokenizer(
__magic_name__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowerCamelCase_ : int = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase_ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCamelCase_ : List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase_ : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = text_embeddings.shape
lowerCamelCase_ : Dict = text_embeddings.repeat(1 , __magic_name__ , 1 )
lowerCamelCase_ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __magic_name__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase_ : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase_ : List[str]
if negative_prompt is None:
lowerCamelCase_ : Optional[int] = [""] * batch_size
elif type(__magic_name__ ) is not type(__magic_name__ ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(__magic_name__ )} !="
F" {type(__magic_name__ )}." )
elif isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : str = [negative_prompt]
elif batch_size != len(__magic_name__ ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(__magic_name__ )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
lowerCamelCase_ : List[Any] = negative_prompt
lowerCamelCase_ : Union[str, Any] = text_input_ids.shape[-1]
lowerCamelCase_ : List[str] = self.tokenizer(
__magic_name__ , padding="max_length" , max_length=__magic_name__ , truncation=__magic_name__ , return_tensors="pt" , )
lowerCamelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase_ : Any = uncond_embeddings.shape[1]
lowerCamelCase_ : Dict = uncond_embeddings.repeat(1 , __magic_name__ , 1 )
lowerCamelCase_ : List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , __magic_name__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase_ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase_ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase_ : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase_ : Tuple = torch.randn(__magic_name__ , generator=__magic_name__ , device="cpu" , dtype=__magic_name__ ).to(
self.device )
else:
lowerCamelCase_ : str = torch.randn(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCamelCase_ : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__magic_name__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase_ : str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ : Tuple = {}
if accepts_eta:
lowerCamelCase_ : Dict = eta
for i, t in enumerate(self.progress_bar(__magic_name__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase_ : Optional[int] = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
lowerCamelCase_ : Any = self.unet(__magic_name__ , __magic_name__ , encoder_hidden_states=__magic_name__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase_ , lowerCamelCase_ : str = noise_pred.chunk(2 )
lowerCamelCase_ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ : List[str] = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__magic_name__ , __magic_name__ , __magic_name__ )
lowerCamelCase_ : Dict = 1 / 0.1_8215 * latents
lowerCamelCase_ : Optional[Any] = self.vae.decode(__magic_name__ ).sample
lowerCamelCase_ : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__magic_name__ , nsfw_content_detected=__magic_name__ )
| 253
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple=() , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]="no" , __UpperCAmelCase : Tuple="29500" ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : str = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
lowerCamelCase_ : Any = True
elif "IPython" in sys.modules:
lowerCamelCase_ : List[Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
lowerCamelCase_ : Dict = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , __UpperCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
lowerCamelCase_ : str = 8
lowerCamelCase_ : Optional[int] = PrepareForLaunch(__UpperCAmelCase , distributed_type="TPU" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*__UpperCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr="127.0.01" , master_port=__UpperCAmelCase , mixed_precision=__UpperCAmelCase ):
lowerCamelCase_ : List[Any] = PrepareForLaunch(__UpperCAmelCase , distributed_type="MULTI_GPU" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCamelCase_ : List[str] = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*__UpperCAmelCase )
def __a ( __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=() , __UpperCAmelCase : Optional[Any]=2 ) -> List[Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__UpperCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
lowerCamelCase_ : List[str] = PrepareForLaunch(__UpperCAmelCase , debug=__UpperCAmelCase )
start_processes(__UpperCAmelCase , args=__UpperCAmelCase , nprocs=__UpperCAmelCase , start_method="fork" )
| 253
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, ) -> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, ) -> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_, nominal_annual_percentage_rate / 3_6_5, number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 387
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowerCamelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str ,__lowerCamelCase : float ,__lowerCamelCase : Callable ,__lowerCamelCase : int ,__lowerCamelCase : float = 1.0 ,__lowerCamelCase : str = None ,):
'''simple docstring'''
super().__init__()
a = initial_learning_rate
a = warmup_steps
a = power
a = decay_schedule_fn
a = name
def __call__( self : int ,__lowerCamelCase : str ):
'''simple docstring'''
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a = tf.cast(__lowerCamelCase ,tf.floataa )
a = tf.cast(self.warmup_steps ,tf.floataa )
a = global_step_float / warmup_steps_float
a = self.initial_learning_rate * tf.math.pow(__lowerCamelCase ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ = 0.0, snake_case_ = 0.9, snake_case_ = 0.999, snake_case_ = 1e-8, snake_case_ = None, snake_case_ = None, snake_case_ = 0.0, snake_case_ = 1.0, snake_case_ = None, ) -> List[str]:
"""simple docstring"""
a = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=snake_case_, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=snake_case_, )
if num_warmup_steps:
a = WarmUp(
initial_learning_rate=snake_case_, decay_schedule_fn=snake_case_, warmup_steps=snake_case_, )
if weight_decay_rate > 0.0:
a = AdamWeightDecay(
learning_rate=snake_case_, weight_decay_rate=snake_case_, beta_a=snake_case_, beta_a=snake_case_, epsilon=snake_case_, clipnorm=snake_case_, global_clipnorm=snake_case_, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=snake_case_, )
else:
a = tf.keras.optimizers.Adam(
learning_rate=snake_case_, beta_a=snake_case_, beta_a=snake_case_, epsilon=snake_case_, clipnorm=snake_case_, global_clipnorm=snake_case_, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowerCamelCase_ ( a_ ):
def __init__( self : Any ,__lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 ,__lowerCamelCase : float = 0.9 ,__lowerCamelCase : float = 0.999 ,__lowerCamelCase : float = 1e-7 ,__lowerCamelCase : bool = False ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : Optional[List[str]] = None ,__lowerCamelCase : Optional[List[str]] = None ,__lowerCamelCase : str = "AdamWeightDecay" ,**__lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
a = weight_decay_rate
a = include_in_weight_decay
a = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str ,__lowerCamelCase : Any ):
'''simple docstring'''
a = {'''WarmUp''': WarmUp}
return super(__lowerCamelCase ,cls ).from_config(__lowerCamelCase ,custom_objects=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super(__lowerCamelCase ,self )._prepare_local(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = tf.constant(
self.weight_decay_rate ,name='''adam_weight_decay_rate''' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ):
'''simple docstring'''
a = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] ,use_locking=self._use_locking ,)
return tf.no_op()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Dict=None ,**__lowerCamelCase : int ):
'''simple docstring'''
a , a = list(zip(*__lowerCamelCase ) )
return super(__lowerCamelCase ,self ).apply_gradients(zip(__lowerCamelCase ,__lowerCamelCase ) ,name=__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Dict ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a = apply_state or {}
a = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a = self._fallback_apply_state(__lowerCamelCase ,__lowerCamelCase )
a = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple=None ):
'''simple docstring'''
a , a = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCamelCase )
a = self._decay_weights_op(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase ,self )._resource_apply_dense(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str]=None ):
'''simple docstring'''
a , a = self._get_lr(var.device ,var.dtype.base_dtype ,__lowerCamelCase )
a = self._decay_weights_op(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(__lowerCamelCase ,self )._resource_apply_sparse(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
a = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : int ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__lowerCamelCase ,__lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__lowerCamelCase ,__lowerCamelCase ) is not None:
return False
return True
class lowerCamelCase_ ( a_ ):
def __init__( self : Optional[int] ):
'''simple docstring'''
a = []
a = None
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
if self._accum_steps is None:
a = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=__lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str ,__lowerCamelCase : List[str] ):
'''simple docstring'''
if not self._gradients:
a = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__lowerCamelCase ) ,trainable=__lowerCamelCase ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients ,__lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__lowerCamelCase )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__lowerCamelCase ) )
| 387
| 1
|
import operator
def A__ ( snake_case_ : list , snake_case_ : bool = False , snake_case_ : list | None = None ):
SCREAMING_SNAKE_CASE__: List[Any]= operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE__: Union[str, Any]= solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE__: Union[str, Any]= [arr.pop(0 )]
for i, item in enumerate(snake_case_ ):
if _operator(snake_case_ , sublist[-1] ):
sublist.append(snake_case_ )
arr.pop(snake_case_ )
# merging sublist into solution list
if not solution:
solution.extend(snake_case_ )
else:
while sublist:
SCREAMING_SNAKE_CASE__: Optional[Any]= sublist.pop(0 )
for i, xx in enumerate(snake_case_ ):
if not _operator(snake_case_ , snake_case_ ):
solution.insert(snake_case_ , snake_case_ )
break
else:
solution.append(snake_case_ )
strand_sort(snake_case_ , snake_case_ , snake_case_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 107
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : List[str] = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = ['MaskFormerFeatureExtractor']
lowercase_ : Dict = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : str = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
lowercase_ : Optional[Any] = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 107
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : List[str] = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : str = features.copy() if features else default_expected_features
lowerCAmelCase__ : List[Any] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCAmelCase__ : str = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
if issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = parquet_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : Any = [parquet_path]
lowerCAmelCase__ : int = tmp_path / '''cache'''
lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_dataset(UpperCamelCase , UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str:
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
lowerCAmelCase__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : Any = tmp_path / '''cache'''
lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase__ : Optional[int] = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict:
if split:
lowerCAmelCase__ : Tuple = {split: parquet_path}
else:
lowerCAmelCase__ : int = '''train'''
lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ : int = pf.read()
assert dataset.data.table == output_table
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple:
lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ : Dict = {'''image''': [image_path]}
lowerCAmelCase__ : int = Features({'''image''': Image()} )
lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase )
lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any:
assert get_writer_batch_size(UpperCamelCase ) == expected
| 678
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__UpperCAmelCase = 'http://www.mocksite.com/file1.txt'
__UpperCAmelCase = '"text": ["foo", "foo"]'
__UpperCAmelCase = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _a :
"""simple docstring"""
A = 2_00
A = {'Content-Length': '100'}
A = {}
def __a ( self ,**__SCREAMING_SNAKE_CASE ):
return [bytes(__SCREAMING_SNAKE_CASE ,'utf-8' )]
def SCREAMING_SNAKE_CASE_ ( *snake_case_ : Optional[Any] , **snake_case_ : Optional[int] ) -> str:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int ) -> Any:
import requests
monkeypatch.setattr(snake_case_ , 'request' , snake_case_ )
SCREAMING_SNAKE_CASE : int = URL
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = url
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [url]
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[Any] = {'train': url}
SCREAMING_SNAKE_CASE : List[Any] = 'dummy'
SCREAMING_SNAKE_CASE : str = 'downloads'
SCREAMING_SNAKE_CASE : str = tmp_path
SCREAMING_SNAKE_CASE : int = DownloadConfig(
cache_dir=os.path.join(snake_case_ , snake_case_ ) , use_etag=snake_case_ , )
SCREAMING_SNAKE_CASE : Tuple = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
SCREAMING_SNAKE_CASE : Any = dl_manager.download(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[str] = [downloaded_paths]
SCREAMING_SNAKE_CASE : List[str] = [urls]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE : Optional[Any] = downloaded_paths.values()
SCREAMING_SNAKE_CASE : Dict = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case_ , snake_case_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE : List[Any] = Path(snake_case_ )
SCREAMING_SNAKE_CASE : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE : List[Any] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple ) -> Any:
SCREAMING_SNAKE_CASE : int = str(snake_case_ )
if issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Dict = filename
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : List[str] = [filename]
elif issubclass(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = {'train': filename}
SCREAMING_SNAKE_CASE : Dict = 'dummy'
SCREAMING_SNAKE_CASE : Tuple = xz_file.parent
SCREAMING_SNAKE_CASE : Any = 'extracted'
SCREAMING_SNAKE_CASE : Optional[int] = DownloadConfig(
cache_dir=snake_case_ , use_etag=snake_case_ , )
SCREAMING_SNAKE_CASE : List[str] = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = dl_manager.extract(snake_case_ )
SCREAMING_SNAKE_CASE : Any = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case_ , snake_case_ ):
SCREAMING_SNAKE_CASE : Any = [extracted_paths]
SCREAMING_SNAKE_CASE : Tuple = [paths]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE : Dict = extracted_paths.values()
SCREAMING_SNAKE_CASE : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case_ , snake_case_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE : Optional[int] = Path(snake_case_ )
SCREAMING_SNAKE_CASE : int = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case_ , etag=snake_case_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE : List[Any] = extracted_path.read_text()
SCREAMING_SNAKE_CASE : Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ) -> List[str]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(snake_case_ , start=1 ):
SCREAMING_SNAKE_CASE : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE : int = request.getfixturevalue(snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE : str = request.getfixturevalue(snake_case_ )
SCREAMING_SNAKE_CASE : int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case_ ) , start=1 ):
assert os.path.basename(snake_case_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 220
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : CommonSchedulerState
# setable values
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : jnp.ndarray
_UpperCamelCase : Optional[int] = None
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , lowerCamelCase_ : CommonSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray ):
'''simple docstring'''
return cls(common=lowerCamelCase_ , init_noise_sigma=lowerCamelCase_ , timesteps=lowerCamelCase_ )
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : DDPMSchedulerState
class lowercase ( a_ , a_ ):
"""simple docstring"""
_UpperCamelCase : List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCamelCase : jnp.dtype
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 10_00 , lowerCamelCase_ : float = 0.0001 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : str = "linear" , lowerCamelCase_ : Optional[jnp.ndarray] = None , lowerCamelCase_ : str = "fixed_small" , lowerCamelCase_ : bool = True , lowerCamelCase_ : str = "epsilon" , lowerCamelCase_ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
_snake_case : Optional[int] = dtype
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
_snake_case : Union[str, Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_snake_case : List[Any] = jnp.array(1.0 , dtype=self.dtype )
_snake_case : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCamelCase_ , init_noise_sigma=lowerCamelCase_ , timesteps=lowerCamelCase_ , )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : Optional[int] = None ):
'''simple docstring'''
return sample
def __UpperCAmelCase ( self : str , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : int , lowerCamelCase_ : Tuple = () ):
'''simple docstring'''
_snake_case : Tuple = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_snake_case : Any = (jnp.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCamelCase_ , timesteps=lowerCamelCase_ , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Any]=None ):
'''simple docstring'''
_snake_case : List[Any] = state.common.alphas_cumprod[t]
_snake_case : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_snake_case : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_snake_case : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_snake_case : Dict = jnp.clip(lowerCamelCase_ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_snake_case : Union[str, Any] = jnp.log(jnp.clip(lowerCamelCase_ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
_snake_case : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_snake_case : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_snake_case : str = variance
_snake_case : Dict = state.common.betas[t]
_snake_case : Union[str, Any] = (predicted_variance + 1) / 2
_snake_case : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : int , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : Optional[jax.random.KeyArray] = None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
_snake_case : Any = timestep
if key is None:
_snake_case : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_snake_case , _snake_case : Tuple = jnp.split(lowerCamelCase_ , sample.shape[1] , axis=1 )
else:
_snake_case : Any = None
# 1. compute alphas, betas
_snake_case : Tuple = state.common.alphas_cumprod[t]
_snake_case : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_snake_case : List[str] = 1 - alpha_prod_t
_snake_case : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_snake_case : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_snake_case : int = model_output
elif self.config.prediction_type == "v_prediction":
_snake_case : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_snake_case : Union[str, Any] = jnp.clip(lowerCamelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : str = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_snake_case : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_snake_case : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_snake_case : Optional[Any] = jax.random.split(lowerCamelCase_ , num=1 )
_snake_case : Tuple = jax.random.normal(lowerCamelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCamelCase_ , lowerCamelCase_ , predicted_variance=lowerCamelCase_ ) ** 0.5) * noise
_snake_case : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_snake_case : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCamelCase_ , state=lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : DDPMSchedulerState , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , lowerCamelCase_ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 304
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__( __lowerCAmelCase ):
_snake_case : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_snake_case : Optional[int] = emb.weight.data
return lin_layer
def A__( __lowerCAmelCase ):
_snake_case : List[str] = torch.load(__lowerCAmelCase , map_location='cpu' )
_snake_case : List[Any] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(__lowerCAmelCase )
_snake_case : List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : Optional[Any] = XGLMConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : Optional[Any] = XGLMForCausalLM(__lowerCAmelCase )
_snake_case : Any = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
print(__lowerCAmelCase )
_snake_case : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304
| 1
|
'''simple docstring'''
import qiskit
def __lowerCamelCase ( _lowercase , _lowercase ) -> qiskit.result.counts.Counts:
UpperCAmelCase : Any = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase : int = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase : Union[str, Any] = qiskit.execute(_lowercase , _lowercase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 707
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a : int = None
a : List[Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
a : Union[str, Any] = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
a : List[Any] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
a : int = """▁"""
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BarthezTokenizer
def __init__( self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , **A , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = False if not self.vocab_file else True
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[int] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 672
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "imagegpt"
lowerCamelCase_ = ["past_key_values"]
lowerCamelCase_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :int , __A :int=512 + 1 , __A :int=32 * 32 , __A :Any=512 , __A :Any=24 , __A :Tuple=8 , __A :Tuple=None , __A :Optional[Any]="quick_gelu" , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :int=0.1 , __A :Dict=1E-5 , __A :List[str]=0.0_2 , __A :Optional[int]=True , __A :str=True , __A :Union[str, Any]=False , __A :Dict=False , __A :List[Any]=False , **__A :str , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_positions
SCREAMING_SNAKE_CASE__ = n_embd
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = n_inner
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = resid_pdrop
SCREAMING_SNAKE_CASE__ = embd_pdrop
SCREAMING_SNAKE_CASE__ = attn_pdrop
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scale_attn_weights
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE__ = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
@property
def _snake_case ( self :str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self :Dict , __A :"FeatureExtractionMixin" , __A :int = 1 , __A :int = -1 , __A :bool = False , __A :Optional["TensorType"] = None , __A :int = 3 , __A :int = 32 , __A :int = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 6
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 465
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case ( a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
lowerCAmelCase = np.random.RandomState(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs['prompt']]
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
lowerCAmelCase = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCAmelCase = text_inputs['input_ids']
lowerCAmelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
lowerCAmelCase = prompt_embeds
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * ['this is a negative prompt']
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs['prompt']]
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
lowerCAmelCase = self.get_dummy_inputs()
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
lowerCAmelCase = []
for p in [prompt, negative_prompt]:
lowerCAmelCase = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , )
lowerCAmelCase = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
lowerCAmelCase , lowerCAmelCase = embeds
# forward
lowerCAmelCase = pipe(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ort.SessionOptions()
lowerCAmelCase = False
return options
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'open neural network exchange'
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'open neural network exchange'
lowerCAmelCase = np.random.RandomState(0 )
lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type='np' )
lowerCAmelCase = output.images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
lowerCAmelCase = latents[0, -3:, -3:, -1]
lowerCAmelCase = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
lowerCAmelCase = False
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = 'Andromeda galaxy in a bottle'
lowerCAmelCase = np.random.RandomState(0 )
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert pipe.safety_checker is None
lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCAmelCase = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 700
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a_ ):
SCREAMING_SNAKE_CASE : List[str] = ['''torch''', '''torchsde''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls , ['torch', 'torchsde'] )
| 514
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = "nat"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , __snake_case : Optional[Any]=4 , __snake_case : Union[str, Any]=3 , __snake_case : Union[str, Any]=64 , __snake_case : Dict=[3, 4, 6, 5] , __snake_case : Optional[int]=[2, 4, 8, 16] , __snake_case : str=7 , __snake_case : Dict=3.0 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Tuple=0.1 , __snake_case : Optional[Any]="gelu" , __snake_case : List[str]=0.02 , __snake_case : Tuple=1e-5 , __snake_case : Dict=0.0 , __snake_case : Optional[int]=None , __snake_case : List[str]=None , **__snake_case : Optional[int] , )-> Optional[Any]:
super().__init__(**__lowerCamelCase )
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(__lowerCamelCase )
snake_case = num_heads
snake_case = kernel_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = layer_norm_eps
snake_case = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
snake_case = layer_scale_init_value
snake_case = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(__lowerCamelCase ) + 1 )]
snake_case = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 369
|
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : int = (boundary[1] - boundary[0]) / steps
lowerCAmelCase__ : Optional[int] = boundary[0]
lowerCAmelCase__ : int = boundary[1]
lowerCAmelCase__ : Tuple = make_points(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : str = 0.0
y += (h / 2.0) * f(lowerCamelCase_)
for i in x_i:
# print(i)
y += h * f(lowerCamelCase_)
y += (h / 2.0) * f(lowerCamelCase_)
return y
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Dict):
'''simple docstring'''
lowerCAmelCase__ : Dict = a + h
while x < (b - h):
yield x
lowerCAmelCase__ : Any = x + h
def lowerCAmelCase__ ( lowerCamelCase_ : str): # enter your function here
'''simple docstring'''
lowerCAmelCase__ : List[str] = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 0.0 # Lower bound of integration
lowerCAmelCase__ : Optional[int] = 1.0 # Upper bound of integration
lowerCAmelCase__ : List[str] = 10.0 # define number of steps or resolution
lowerCAmelCase__ : Dict = [a, b] # define boundary of integration
lowerCAmelCase__ : Any = method_a(lowerCamelCase_ ,lowerCamelCase_)
print(f"""y = {y}""")
if __name__ == "__main__":
main()
| 647
| 0
|
UpperCAmelCase_ = 65_521
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
lowercase_ : Tuple = 1
lowercase_ : int = 0
for plain_chr in plain_text:
lowercase_ : List[Any] = (a + ord(UpperCAmelCase__ )) % MOD_ADLER
lowercase_ : int = (b + a) % MOD_ADLER
return (b << 16) | a
| 715
|
import math
def __magic_name__ ( lowercase ) -> bool:
"""simple docstring"""
lowercase_ : Optional[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowercase )
def __magic_name__ ( lowercase = 1 / 12345 ) -> int:
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : List[Any] = 0
lowercase_ : List[str] = 3
while True:
lowercase_ : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowercase ):
lowercase_ : Optional[int] = int(lowercase )
total_partitions += 1
if check_partition_perfect(lowercase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowercase )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 436
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A ( UpperCAmelCase , UpperCAmelCase ):
a_ = 1
@register_to_config
def __init__( self : Tuple , __a : int = 1_0_0_0 , __a : Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__a )
# standard deviation of the initial noise distribution
__UpperCAmelCase = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase = 4
# running values
__UpperCAmelCase = []
def snake_case__ ( self : str , __a : int , __a : Union[str, torch.device] = None ) -> str:
__UpperCAmelCase = num_inference_steps
__UpperCAmelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase = timesteps.to(__a )
__UpperCAmelCase = []
def snake_case__ ( self : List[Any] , __a : torch.FloatTensor , __a : int , __a : torch.FloatTensor , __a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
__UpperCAmelCase = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase = timestep_index + 1
__UpperCAmelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__a )
if len(self.ets ) == 1:
__UpperCAmelCase = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
__UpperCAmelCase = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase = self._get_prev_sample(__a , __a , __a , __a )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def snake_case__ ( self : List[Any] , __a : torch.FloatTensor , *__a : Any , **__a : Optional[int] ) -> torch.FloatTensor:
return sample
def snake_case__ ( self : int , __a : List[Any] , __a : Any , __a : int , __a : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase = self.alphas[timestep_index]
__UpperCAmelCase = self.betas[timestep_index]
__UpperCAmelCase = self.alphas[prev_timestep_index]
__UpperCAmelCase = self.betas[prev_timestep_index]
__UpperCAmelCase = (sample - sigma * ets) / max(__a , 1e-8 )
__UpperCAmelCase = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Dict ) -> str:
return self.config.num_train_timesteps
| 262
| 0
|
__lowerCamelCase = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
__lowerCamelCase = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 12,
'''Pm''': 15,
'''Em''': 18,
'''Zm''': 21,
'''Ym''': 24,
}
def _snake_case ( __snake_case , __snake_case , __snake_case ) -> float:
'''simple docstring'''
UpperCAmelCase_ : Dict = from_type.lower().strip("s" )
UpperCAmelCase_ : int = to_type.lower().strip("s" )
UpperCAmelCase_ : List[Any] = UNIT_SYMBOL.get(__snake_case , __snake_case )
UpperCAmelCase_ : int = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase_ : List[Any] = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(__snake_case )}"""
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase_ : Optional[int] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(__snake_case )}"""
)
raise ValueError(__snake_case )
UpperCAmelCase_ : Tuple = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase_ : Optional[int] = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase_ : List[str] = 1
if from_exponent > to_exponent:
UpperCAmelCase_ : Tuple = from_exponent - to_exponent
else:
UpperCAmelCase_ : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(1_0 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """ClapFeatureExtractor"""
_lowerCamelCase = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self ,lowercase ,lowercase):
"""simple docstring"""
super().__init__(lowercase ,lowercase)
def __call__( self ,lowercase=None ,lowercase=None ,lowercase=None ,**lowercase):
"""simple docstring"""
UpperCAmelCase_ : Dict = kwargs.pop("sampling_rate" ,lowercase)
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none.")
if text is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(lowercase ,return_tensors=lowercase ,**lowercase)
if audios is not None:
UpperCAmelCase_ : str = self.feature_extractor(
lowercase ,sampling_rate=lowercase ,return_tensors=lowercase ,**lowercase)
if text is not None and audios is not None:
UpperCAmelCase_ : Optional[int] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase) ,tensor_type=lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase ,**lowercase)
def A_ ( self ,*lowercase ,**lowercase):
"""simple docstring"""
return self.tokenizer.decode(*lowercase ,**lowercase)
@property
def A_ ( self):
"""simple docstring"""
UpperCAmelCase_ : str = self.tokenizer.model_input_names
UpperCAmelCase_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names))
| 455
| 0
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __snake_case ):
def __init__( self :Dict , *__magic_name__ :Dict , __magic_name__ :Union[str, Any]=None , __magic_name__ :str=None , **__magic_name__ :Optional[int] ):
'''simple docstring'''
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
a = eval_examples
a = post_process_function
def lowerCamelCase__ ( self :Any , __magic_name__ :int=None , __magic_name__ :int=None , __magic_name__ :Tuple=None , __magic_name__ :str = "eval" ):
'''simple docstring'''
a = self.eval_dataset if eval_dataset is None else eval_dataset
a = self.get_eval_dataloader(__lowerCamelCase )
a = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a = self.compute_metrics
a = None
a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a = time.time()
try:
a = eval_loop(
__lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
a = compute_metrics
a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions )
a = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
a = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
else:
a = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__lowerCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a = self.callback_handler.on_evaluate(self.args , self.state , self.control , __lowerCamelCase )
return metrics
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Any , __magic_name__ :Dict=None , __magic_name__ :str = "test" ):
'''simple docstring'''
a = self.get_test_dataloader(__lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
a = self.compute_metrics
a = None
a = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a = time.time()
try:
a = eval_loop(
__lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__lowerCamelCase , metric_key_prefix=__lowerCamelCase , )
finally:
a = compute_metrics
a = self.args.eval_batch_size * self.args.world_size
if F'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
__lowerCamelCase , __lowerCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a = self.post_process_function(__lowerCamelCase , __lowerCamelCase , output.predictions , """predict""" )
a = self.compute_metrics(__lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
a = metrics.pop(__lowerCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__lowerCamelCase )
| 468
|
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
UpperCAmelCase = 0
UpperCAmelCase = len(lowerCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , lowerCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if len(lowerCAmelCase_ ) <= 1:
return arr, 0
UpperCAmelCase = len(lowerCAmelCase_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = _count_cross_inversions(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(lowerCAmelCase_ ) and j < len(lowerCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(lowerCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(lowerCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def _UpperCamelCase ( ) ->int:
UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , lowerCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = count_inversions_recursive(lowerCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 377
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_snake_case = logging.get_logger(__name__)
_snake_case = '''T5Config'''
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase : Optional[Any] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = "mt5"
__A : Dict = MTaConfig
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = "mt5"
__A : List[str] = MTaConfig
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "mt5"
__A : Union[str, Any] = MTaConfig
| 231
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowercase_( SCREAMING_SNAKE_CASE_ = "isbn/0140328726" ):
'''simple docstring'''
lowerCamelCase : List[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
lowerCamelCase : Tuple = f"""{olid} is not a valid Open Library olid"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
lowerCamelCase : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase : Union[str, Any] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
lowerCamelCase : str = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = ", ".join(SCREAMING_SNAKE_CASE_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_snake_case = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_snake_case = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('''\n'''.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 231
| 1
|
'''simple docstring'''
import math
def lowercase__ ( __lowercase : list , __lowercase : int = 0 , __lowercase : int = 0 ) -> list:
"""simple docstring"""
__UpperCamelCase = end or len(__lowercase )
for i in range(__lowercase , __lowercase ):
__UpperCamelCase = i
__UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__UpperCamelCase = array[temp_index - 1]
temp_index -= 1
__UpperCamelCase = temp_index_value
return array
def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int ) -> None: # Max Heap
"""simple docstring"""
__UpperCamelCase = index
__UpperCamelCase = 2 * index + 1 # Left Node
__UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__UpperCamelCase = right_index
if largest != index:
__UpperCamelCase , __UpperCamelCase = array[largest], array[index]
heapify(__lowercase , __lowercase , __lowercase )
def lowercase__ ( __lowercase : list ) -> list:
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowercase , __lowercase , __lowercase )
for i in range(n - 1 , 0 , -1 ):
__UpperCamelCase , __UpperCamelCase = array[0], array[i]
heapify(__lowercase , 0 , __lowercase )
return array
def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int ) -> int:
"""simple docstring"""
__UpperCamelCase = low
__UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__UpperCamelCase , __UpperCamelCase = array[j], array[i]
i += 1
def lowercase__ ( __lowercase : list ) -> list:
"""simple docstring"""
if len(__lowercase ) == 0:
return array
__UpperCamelCase = 2 * math.ceil(math.loga(len(__lowercase ) ) )
__UpperCamelCase = 16
return intro_sort(__lowercase , 0 , len(__lowercase ) , __lowercase , __lowercase )
def lowercase__ ( __lowercase : list , __lowercase : int , __lowercase : int , __lowercase : int , __lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowercase )
max_depth -= 1
__UpperCamelCase = median_of_a(__lowercase , __lowercase , start + ((end - start) // 2) + 1 , end - 1 )
__UpperCamelCase = partition(__lowercase , __lowercase , __lowercase , __lowercase )
intro_sort(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = p
return insertion_sort(__lowercase , __lowercase , __lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any =input('''Enter numbers separated by a comma : ''').strip()
a__ : List[Any] =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 399
|
'''simple docstring'''
def lowercase__ ( __lowercase : int , __lowercase : Tuple , __lowercase : Tuple ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__UpperCamelCase = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
a__ : List[str] =701
a__ : Union[str, Any] =1_000_000_000
a__ : Union[str, Any] =10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 399
| 1
|
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : str = logging.getLogger()
def lowercase ( __snake_case : Path , __snake_case : list ):
lowercase_ : Optional[int] = '''\n'''.join(__snake_case )
Path(__snake_case ).open('''w''' ).writelines(__snake_case )
__A : Tuple = '''patrickvonplaten/t5-tiny-random'''
__A : Dict = '''sshleifer/bart-tiny-random'''
__A : Dict = '''sshleifer/tiny-mbart'''
__A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _UpperCAmelCase ( _A ):
def A ( self : List[Any] , A : str ) -> Any:
lowercase_ : List[str] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase_ : str = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase_ : List[Any] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(A , A )
lowercase_ : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
lowercase_ : Tuple = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase_ : Union[str, Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(A , '''argv''' , A ):
run_generate()
assert Path(A ).exists()
# os.remove(Path(output_file_name))
def A ( self : Union[str, Any] ) -> List[str]:
self.run_eval_tester(A )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def A ( self : str , A : str ) -> Dict:
self.run_eval_tester(A )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def A ( self : Union[str, Any] , A : Optional[Any] ) -> Optional[Any]:
lowercase_ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
lowercase_ : Optional[int] = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
lowercase_ : List[Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
lowercase_ : str = Path(self.get_auto_remove_tmp_dir() )
lowercase_ : List[str] = str(tmp_dir / '''scores.json''' )
lowercase_ : Optional[Any] = str(tmp_dir / '''val.target''' )
_dump_articles(A , text['''en'''] )
_dump_articles(A , text['''de'''] )
lowercase_ : Union[str, Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
lowercase_ : int = F'''
run_eval_search.py
{model}
{str(A )}
{str(A )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(A , '''argv''' , A ):
with CaptureStdout() as cs:
run_search()
lowercase_ : Dict = [''' num_beams | length_penalty''', model, '''Best score args''']
lowercase_ : List[str] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(A )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(A ).exists()
os.remove(Path(A ) )
| 141
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : str = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , A : str=None , A : List[Any]=None , **A : Union[str, Any] ) -> Tuple:
lowercase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowercase_ : List[Any] = kwargs.pop('''feature_extractor''' )
lowercase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__( self : List[Any] , A : List[Any]=None , A : Any=None , A : List[str]=None , A : int="max_length" , A : Optional[Any]="np" , **A : Tuple ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
lowercase_ : Any = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
lowercase_ : int = []
# Maximum number of queries across batch
lowercase_ : str = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
lowercase_ : Union[str, Any] = t + [''' '''] * (max_num_queries - len(A ))
lowercase_ : List[Any] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowercase_ : Optional[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : List[Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase_ : Tuple = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowercase_ : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowercase_ : Tuple = BatchEncoding()
lowercase_ : int = input_ids
lowercase_ : Optional[Any] = attention_mask
if query_images is not None:
lowercase_ : Optional[Any] = BatchEncoding()
lowercase_ : Union[str, Any] = self.image_processor(
A , return_tensors=A , **A ).pixel_values
lowercase_ : Union[str, Any] = query_pixel_values
if images is not None:
lowercase_ : Union[str, Any] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
lowercase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase_ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A ( self : List[str] , *A : int , **A : Dict ) -> Optional[int]:
return self.image_processor.post_process(*A , **A )
def A ( self : Tuple , *A : str , **A : List[str] ) -> Dict:
return self.image_processor.post_process_object_detection(*A , **A )
def A ( self : Union[str, Any] , *A : List[str] , **A : str ) -> Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A ( self : List[Any] , *A : Any , **A : Any ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : List[Any] , *A : List[Any] , **A : int ) -> Union[str, Any]:
return self.tokenizer.decode(*A , **A )
@property
def A ( self : Optional[int] ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def A ( self : List[Any] ) -> List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 141
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
a = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
a = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a = model(__lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
a = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
a = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
a = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
a = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
a = model(__lowerCamelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 387
|
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(snake_case_ ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float:
"""simple docstring"""
a = 0.0
for coeff in reversed(snake_case_ ):
a = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : int = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 387
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
snake_case : Union[str, Any] = 'markuplm'
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=256 , __lowerCAmelCase=1024 , __lowerCAmelCase=216 , __lowerCAmelCase=1001 , __lowerCAmelCase=32 , __lowerCAmelCase=50 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
# additional properties
UpperCamelCase__ = max_depth
UpperCamelCase__ = max_xpath_tag_unit_embeddings
UpperCamelCase__ = max_xpath_subs_unit_embeddings
UpperCamelCase__ = tag_pad_id
UpperCamelCase__ = subs_pad_id
UpperCamelCase__ = xpath_unit_hidden_size
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[int] = """luke"""
def __init__( self , __lowerCAmelCase=50267 , __lowerCAmelCase=500000 , __lowerCAmelCase=768 , __lowerCAmelCase=256 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = entity_vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = entity_emb_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_entity_aware_attention
UpperCamelCase__ = classifier_dropout
| 548
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_lowercase )
class lowerCamelCase_ ( _lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowercase : str = "question"
_lowercase : str = "context"
_lowercase : str = "answers"
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 17
|
def __SCREAMING_SNAKE_CASE ( a__ : int ) -> int:
if not isinstance(a__ ,a__ ):
raise TypeError("""Input value must be an 'int' type""" )
__A : Union[str, Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
| 1
|
"""simple docstring"""
import numpy as np
def UpperCamelCase_ ( lowerCamelCase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase_ ( lowerCamelCase : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : Dict , snake_case : Path , snake_case : Union[str, None] = None , snake_case : Union[List[str], None] = None , snake_case : Union[str, List[str], None] = None , snake_case : bool = True , ) -> int:
'''simple docstring'''
__magic_name__ : List[str] = [file for file in os.listdir(snake_case ) if os.path.isfile(os.path.join(snake_case , snake_case ) )]
if identifier is not None:
__magic_name__ : Tuple = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(snake_case , snake_case ):
for n_ in n_identifier:
__magic_name__ : int = [file for file in files if n_ not in file]
else:
__magic_name__ : Tuple = [file for file in files if n_identifier not in file]
__magic_name__ : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
__magic_name__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , snake_case )
if only_modules:
__magic_name__ : List[Any] = file.split('''.''' )[0]
try:
__magic_name__ : Dict = getattr(snake_case , snake_case )
__magic_name__ : List[str] = doctest.DocTestSuite(snake_case )
__magic_name__ : Dict = unittest.TextTestRunner().run(snake_case )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__magic_name__ : Tuple = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _UpperCAmelCase ( self : str ) -> Optional[int]:
'''simple docstring'''
__magic_name__ : int = Path('''src/transformers''' )
__magic_name__ : str = '''modeling'''
__magic_name__ : str = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(snake_case , identifier=snake_case , ignore_files=snake_case )
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Dict = Path('''src/transformers''' )
__magic_name__ : Union[str, Any] = '''tokenization'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
__magic_name__ : Any = Path('''src/transformers''' )
__magic_name__ : int = '''configuration'''
self.analyze_directory(snake_case , identifier=snake_case )
def _UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[str] = Path('''src/transformers''' )
__magic_name__ : str = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(snake_case , n_identifier=snake_case )
def _UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ : Any = Path('''docs/source''' )
__magic_name__ : str = ['''favicon.ico''']
self.analyze_directory(snake_case , ignore_files=snake_case , only_modules=snake_case )
| 147
| 0
|
def _snake_case (_snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : Tuple , _snake_case : str) -> Dict:
if index == r:
for j in range(_snake_case):
print(data[j] , end=' ')
print(' ')
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_lowercase =arr[i]
combination_util(_snake_case , _snake_case , _snake_case , index + 1 , _snake_case , i + 1)
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , i + 1)
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _snake_case (_snake_case : Any , _snake_case : List[str] , _snake_case : List[Any]) -> int:
# A temporary array to store all combination one by one
_lowercase =[0] * r
# Print all combination using temporary array 'data[]'
combination_util(_snake_case , _snake_case , _snake_case , 0 , _snake_case , 0)
if __name__ == "__main__":
# Driver code to check the function above
_SCREAMING_SNAKE_CASE = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 181
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.dummy_uncond_unet
_lowercase =ScoreSdeVeScheduler()
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case).images
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case, return_dict=snake_case)[
0
]
_lowercase =image[0, -3:, -3:, -1]
_lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='google/ncsnpp-church-256'
_lowercase =UNetaDModel.from_pretrained(snake_case)
_lowercase =ScoreSdeVeScheduler.from_pretrained(snake_case)
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=10, output_type='numpy', generator=snake_case).images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 181
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Optional[int] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
"moussaKam/mbarthez": 1_024,
"moussaKam/barthez": 1_024,
"moussaKam/barthez-orangesum-title": 1_024,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "▁"
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : List[Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =["""input_ids""", """attention_mask"""]
lowercase : Union[str, Any] =BarthezTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ :Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :Optional[Any] = vocab_file
lowercase_ :List[Any] = False if not self.vocab_file else True
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ :Optional[int] = [self.cls_token_id]
lowercase_ :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Optional[int] = [self.sep_token_id]
lowercase_ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase_ :str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 441
|
from __future__ import annotations
def UpperCamelCase ( _a , _a = None , _a = None ) -> None:
'''simple docstring'''
if start is None:
lowercase_ :str = 0
if end is None:
lowercase_ :str = len(_a ) - 1
if start >= end:
return
lowercase_ :Dict = (start + end) // 2
slowsort(_a , _a , _a )
slowsort(_a , mid + 1 , _a )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ :List[Any] = sequence[mid], sequence[end]
slowsort(_a , _a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 441
| 1
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class a__( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=__snake_case , )
assert hasattr(self , 'env' )
def lowercase_ ( self : List[Any] , __snake_case : Union[str, Any]=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__snake_case , instance_type=self.instance_type , debugger_hook_config=__snake_case , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def lowercase_ ( self : int , __snake_case : Any ):
TrainingJobAnalytics(__snake_case ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def lowercase_ ( self : List[Any] ):
# create estimator
a : Optional[Any] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __snake_case )
| 526
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : int , __snake_case : str , __snake_case : List[str] ):
super().__init__(__snake_case , __snake_case )
def __call__( self : int , __snake_case : Optional[Any] , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , ):
a : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
a : List[str] = self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def lowercase_ ( self : int , *__snake_case : List[str] , **__snake_case : List[str] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , *__snake_case : Tuple , **__snake_case : Union[str, Any] ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.tokenizer.model_input_names
a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 526
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = TextaTextGenerationPipeline(model=lowerCamelCase, tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def lowercase__ ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = generator('''Something there''' )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
lowercase__ = generator(['''This is great !''', '''Something else'''], num_return_sequences=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
lowercase__ = generator(
['''This is great !''', '''Something else'''], num_return_sequences=2, batch_size=2, do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
[{'''generated_text''': ANY(lowerCamelCase )}, {'''generated_text''': ANY(lowerCamelCase )}],
], )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''pt''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
lowercase__ = 3
lowercase__ = generator(
'''Something there''', num_return_sequences=lowerCamelCase, num_beams=lowerCamelCase, )
lowercase__ = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(lowerCamelCase, lowerCamelCase )
lowercase__ = generator('''This is a test''', do_sample=lowerCamelCase, num_return_sequences=2, return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase, [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
], )
lowercase__ = generator.model.config.eos_token_id
lowercase__ = '''<pad>'''
lowercase__ = generator(
['''This is a test''', '''This is a second test'''], do_sample=lowerCamelCase, num_return_sequences=2, batch_size=2, return_tensors=lowerCamelCase, )
self.assertEqual(
lowerCamelCase, [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
], )
@require_tf
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = pipeline('''text2text-generation''', model='''patrickvonplaten/t5-tiny-random''', framework='''tf''' )
# do_sample=False necessary for reproducibility
lowercase__ = generator('''Something there''', do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase, [{'''generated_text''': ''''''}] )
| 716
|
import argparse
import os
import re
A__ : Optional[int] = 'src/transformers'
# Pattern that looks at the indentation in a line.
A__ : Union[str, Any] = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
A__ : List[str] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
A__ : List[Any] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
A__ : int = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
A__ : Tuple = re.compile(r'\[([^\]]+)\]')
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = _re_indent.search(lowerCamelCase_ )
return "" if search is None else search.groups()[0]
def a ( lowerCamelCase_ , lowerCamelCase_="" , lowerCamelCase_=None , lowerCamelCase_=None ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCamelCase_ ):
index += 1
lowercase__ = ['''\n'''.join(lines[:index] )]
else:
lowercase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ = [lines[index]]
index += 1
while index < len(lowerCamelCase_ ) and (end_prompt is None or not lines[index].startswith(lowerCamelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCamelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCamelCase_ ) )
if index < len(lowerCamelCase_ ) - 1:
lowercase__ = [lines[index + 1]]
index += 1
else:
lowercase__ = []
else:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
lowercase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCamelCase_ ) > 0:
blocks.append('''\n'''.join(lowerCamelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCamelCase_ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def a ( lowerCamelCase_ ):
'''simple docstring'''
def _inner(lowerCamelCase_ ):
return key(lowerCamelCase_ ).lower().replace('''_''' , '''''' )
return _inner
def a ( lowerCamelCase_ , lowerCamelCase_=None ):
'''simple docstring'''
# If no key is provided, we use a noop.
def noop(lowerCamelCase_ ):
return x
if key is None:
lowercase__ = noop
# Constants are all uppercase, they go first.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ = [obj for obj in objects if key(lowerCamelCase_ )[0].isupper() and not key(lowerCamelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ = [obj for obj in objects if not key(lowerCamelCase_ )[0].isupper()]
lowercase__ = ignore_underscore(lowerCamelCase_ )
return sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ ) + sorted(lowerCamelCase_ , key=lowerCamelCase_ )
def a ( lowerCamelCase_ ):
'''simple docstring'''
# This inner function sort imports between [ ].
def _replace(lowerCamelCase_ ):
lowercase__ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] ) + "]"
lowercase__ = import_statement.split('''\n''' )
if len(lowerCamelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ = 2 if lines[1].strip() == '''[''' else 1
lowercase__ = [(i, _re_strip_line.search(lowerCamelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ = sort_objects(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )
lowercase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCamelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ = keys[:-1]
lowercase__ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowerCamelCase_ )] )
return "\n".join(lowerCamelCase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ = _re_bracket_content.sub(_replace , lowerCamelCase_ )
return import_statement
def a ( lowerCamelCase_ , lowerCamelCase_=True ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='''utf-8''' ) as f:
lowercase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ = split_code_in_indented_blocks(
lowerCamelCase_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCamelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ = main_blocks[block_idx]
lowercase__ = block.split('''\n''' )
# Get to the start of the imports.
lowercase__ = 0
while line_idx < len(lowerCamelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ = len(lowerCamelCase_ )
else:
line_idx += 1
if line_idx >= len(lowerCamelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ = '''\n'''.join(block_lines[line_idx:-1] )
lowercase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ = split_code_in_indented_blocks(lowerCamelCase_ , indent_level=lowerCamelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ = [(pattern.search(lowerCamelCase_ ).groups()[0] if pattern.search(lowerCamelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ = [(i, key) for i, key in enumerate(lowerCamelCase_ ) if key is not None]
lowercase__ = [x[0] for x in sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ = 0
lowercase__ = []
for i in range(len(lowerCamelCase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCamelCase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCamelCase_ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCamelCase_ ) )
def a ( lowerCamelCase_=True ):
'''simple docstring'''
lowercase__ = []
for root, _, files in os.walk(lowerCamelCase_ ):
if "__init__.py" in files:
lowercase__ = sort_imports(os.path.join(lowerCamelCase_ , '''__init__.py''' ) , check_only=lowerCamelCase_ )
if result:
lowercase__ = [os.path.join(lowerCamelCase_ , '''__init__.py''' )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Would overwrite {len(lowerCamelCase_ )} files, run `make style`.""" )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
A__ : int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 671
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = '▁'
a_ = {'vocab_file': 'sentencepiece.bpe.model'}
a_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a_ = {
'facebook/xglm-564M': 2_048,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : str , snake_case : Any , snake_case : int="<s>" , snake_case : List[Any]="</s>" , snake_case : Optional[int]="</s>" , snake_case : Tuple="<s>" , snake_case : Union[str, Any]="<unk>" , snake_case : List[Any]="<pad>" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : List[str] , ) -> None:
"""simple docstring"""
UpperCamelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCamelCase_ : str = 7
UpperCamelCase_ : str = [f"<madeupword{i}>" for i in range(self.num_madeup_words )]
UpperCamelCase_ : List[Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
UpperCamelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case ) )
UpperCamelCase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCamelCase_ : Any = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCamelCase_ : List[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
UpperCamelCase_ : Optional[Any] = len(self.sp_model )
UpperCamelCase_ : Any = {f"<madeupword{i}>": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(snake_case )
UpperCamelCase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.__dict__.copy()
UpperCamelCase_ : str = None
UpperCamelCase_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , snake_case : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : Dict = {}
UpperCamelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCamelCase_ : Union[str, Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case ))
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case ))
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(snake_case , out_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase_ : Optional[int] = self.sp_model.PieceToId(snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[Any] ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ''.join(snake_case ).replace(snake_case , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase_ : Dict = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , 'wb' ) as fi:
UpperCamelCase_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 417
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ConvNextFeatureExtractor']
a_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 417
| 1
|
import os
from datetime import datetime as dt
from github import Github
_SCREAMING_SNAKE_CASE = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def _lowerCAmelCase ( ):
__lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowercase = g.get_repo('''huggingface/accelerate''' )
__lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCamelCase_ : i.created_at , reverse=lowerCamelCase_ )
__lowercase = comments[0] if len(lowerCamelCase_ ) > 0 else None
__lowercase = dt.utcnow()
__lowercase = (current_time - issue.updated_at).days
__lowercase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ):
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowerCamelCase_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 56
| 0
|
"""simple docstring"""
def lowercase_ ( _snake_case ):
if edge <= 0 or not isinstance(_snake_case ,_snake_case ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowercase_ ( _snake_case ):
if edge <= 0 or not isinstance(_snake_case ,_snake_case ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 223
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """depth_multiplier""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.25 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="relu6" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : str = image_size
SCREAMING_SNAKE_CASE__ : Any = depth_multiplier
SCREAMING_SNAKE_CASE__ : int = min_depth
SCREAMING_SNAKE_CASE__ : Any = tf_padding
SCREAMING_SNAKE_CASE__ : int = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ : Any = output_stride
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__UpperCamelCase : Dict = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = 26
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE__ : int = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 223
| 1
|
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[int] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def __lowerCamelCase():
SCREAMING_SNAKE_CASE = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __lowerCamelCase(UpperCAmelCase__ : int=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("config" , description=UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate config command" , description=UpperCAmelCase__ )
parser.add_argument(
"--config_file" , default=UpperCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def __lowerCamelCase(UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(UpperCAmelCase__ )
else:
config.to_yaml_file(UpperCAmelCase__ )
print(F"accelerate configuration saved at {config_file}" )
def __lowerCamelCase():
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 721
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''}
_lowerCamelCase : str = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
_lowerCamelCase : Optional[Any] = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_12,
}
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = collections.OrderedDict()
with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader:
SCREAMING_SNAKE_CASE = reader.readlines()
for index, token in enumerate(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = token.rstrip("\n" )
SCREAMING_SNAKE_CASE = index
return vocab
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Any = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE = F"[unused{i}]"
SCREAMING_SNAKE_CASE = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_UpperCamelCase )
def __getstate__( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case( self : str , _UpperCamelCase : str ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip()
return out_string
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 647
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__magic_name__ :Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ :Tuple = torch.permute(snake_case, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ):
# linear layer
__magic_name__ :Dict = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ :Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ :List[str] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if "metadata" in layer:
__magic_name__ :List[str] = layer.split('''metadata''' )
__magic_name__ :int = ''''''.join(split_layer[0] )[:-1]
__magic_name__ :Optional[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__magic_name__ :Union[str, Any] = layer.split('''kvstore''' )
__magic_name__ :int = ''''''.join(split_layer[0] )[:-1]
__magic_name__ :List[str] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__magic_name__ :Dict = layer.split('''/''' )
__magic_name__ :Union[str, Any] = '''/'''.join(split_layer[:-1] )
__magic_name__ :Dict = (split_layer[-1],)
if "kvstore/path" in layer:
__magic_name__ :Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__magic_name__ :Optional[Any] = '''file'''
else:
__magic_name__ :Union[str, Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = rename_keys(snake_case )
__magic_name__ :List[str] = {}
for k, v in current_block.items():
__magic_name__ :Union[str, Any] = v
__magic_name__ :List[str] = new_current_block
torch.save(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case = WEIGHTS_NAME ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = convert_file_size_to_int(snake_case )
__magic_name__ :Union[str, Any] = []
__magic_name__ :Optional[Any] = {}
__magic_name__ :Optional[int] = 0
__magic_name__ :Optional[int] = 0
os.makedirs(snake_case, exist_ok=snake_case )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
__magic_name__ :List[Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__magic_name__ :List[Any] = flatten_dict(snake_case, sep='''/''' )
__magic_name__ :Any = {}
for layer in checkpoint_info.keys():
__magic_name__ , __magic_name__ , __magic_name__ :Optional[Any] = get_key_and_tensorstore_dict(
snake_case, snake_case, snake_case )
if curr_real_layer_name in all_layers:
__magic_name__ :str = content
else:
__magic_name__ :Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__magic_name__ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__magic_name__ :str = torch.tensor(snake_case )
__magic_name__ :List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__magic_name__ , __magic_name__ :Optional[Any] = rename_base_flax_keys(tuple(key.split('''/''' ) ), snake_case )
__magic_name__ :Optional[Any] = '''/'''.join(snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__magic_name__ :Union[str, Any] = os.path.join(
snake_case, weights_name.replace('''.bin''', f'''-{len(snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case, snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
__magic_name__ :Union[str, Any] = {}
__magic_name__ :List[str] = 0
__magic_name__ :int = raw_weights.to(getattr(snake_case, snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__magic_name__ :int = os.path.join(snake_case, weights_name.replace('''.bin''', f'''-{len(snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case, snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__magic_name__ :Union[str, Any] = {}
__magic_name__ :Union[str, Any] = {}
for idx, shard in enumerate(snake_case ):
__magic_name__ :Union[str, Any] = weights_name.replace(
'''.bin''', f'''-{idx+1:05d}-of-{len(snake_case ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__magic_name__ :Dict = os.path.join(snake_case, weights_name.replace('''.bin''', f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(snake_case, os.path.join(snake_case, snake_case ) )
__magic_name__ :str = shard
for key in shard:
__magic_name__ :List[str] = shard_file
# Add the metadata
__magic_name__ :List[Any] = {'''total_size''': total_size}
__magic_name__ :int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case, snake_case ), '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Any = json.dumps(snake_case, indent=2, sort_keys=snake_case ) + '''\n'''
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowercase ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__magic_name__ :int = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__magic_name__ :List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
__magic_name__ :int = TaTokenizer.from_pretrained('''t5-small''' )
__magic_name__ :List[Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__magic_name__ :Optional[Any] = tokenizer(snake_case, return_tensors='''pt''' ).input_ids
__magic_name__ :Any = model.generate(snake_case, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 0
|
import torch
from torch import nn
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: str , __lowerCAmelCase: int , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict=1 , __lowerCAmelCase: Union[str, Any]=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = n_token
__UpperCAmelCase = d_embed
__UpperCAmelCase = d_proj
__UpperCAmelCase = cutoffs + [n_token]
__UpperCAmelCase = [0] + self.cutoffs
__UpperCAmelCase = div_val
__UpperCAmelCase = self.cutoffs[0]
__UpperCAmelCase = len(self.cutoffs ) - 1
__UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
__UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
__UpperCAmelCase = nn.ModuleList()
__UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
else:
self.out_projs.append(__lowerCAmelCase )
self.out_layers.append(nn.Linear(__lowerCAmelCase , __lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__lowerCAmelCase , __lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(__lowerCAmelCase , r_idx - l_idx ) )
__UpperCAmelCase = keep_order
def _UpperCAmelCase ( self: str , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Dict ) -> Dict:
'''simple docstring'''
if proj is None:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , proj.t().contiguous() )
__UpperCAmelCase = nn.functional.linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any]=None , __lowerCAmelCase: Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
__UpperCAmelCase = hidden[..., :-1, :].contiguous()
__UpperCAmelCase = labels[..., 1:].contiguous()
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
__UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
__UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
__UpperCAmelCase = labels != -100
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = (
-nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
if labels is None:
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
__UpperCAmelCase = torch.zeros_like(__lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
__UpperCAmelCase = 0
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
__UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
__UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
__UpperCAmelCase = labels.index_select(0 , __lowerCAmelCase ) - l_idx
__UpperCAmelCase = head_logprob.index_select(0 , __lowerCAmelCase )
__UpperCAmelCase = hidden.index_select(0 , __lowerCAmelCase )
else:
__UpperCAmelCase = hidden
if i == 0:
if labels is not None:
__UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
__UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
__UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
__UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , __lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self.n_clusters == 0:
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
__UpperCAmelCase , __UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
__UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
__UpperCAmelCase = self.out_layers[i].weight
__UpperCAmelCase = self.out_layers[i].bias
if i == 0:
__UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
__UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__lowerCAmelCase )
biases.append(__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__lowerCAmelCase ) - 1 ):
__UpperCAmelCase , __UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
__UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
__UpperCAmelCase = self._compute_logit(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = nn.functional.log_softmax(__lowerCAmelCase , dim=1 )
__UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
__UpperCAmelCase = logprob_i
return out
| 221
| 0
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCamelCase = 16
_UpperCamelCase = 32
def _lowerCAmelCase( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 16 ) -> Union[str, Any]:
lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase_ : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase_ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ = 8
else:
lowerCAmelCase__ = None
return tokenizer.pad(
UpperCAmelCase_ , padding="""longest""" , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
lowerCAmelCase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCamelCase = mocked_dataloaders # noqa: F811
def _lowerCAmelCase( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ) -> str:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase_ ) == "1":
lowerCAmelCase__ = 2
# New Code #
lowerCAmelCase__ = int(args.gradient_accumulation_steps )
lowerCAmelCase__ = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["""lr"""]
lowerCAmelCase__ = int(config["""num_epochs"""] )
lowerCAmelCase__ = int(config["""seed"""] )
lowerCAmelCase__ = int(config["""batch_size"""] )
lowerCAmelCase__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(UpperCAmelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
lowerCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase_ , model=UpperCAmelCase_ , local_sgd_steps=UpperCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase_ ):
lowerCAmelCase__ = model(**UpperCAmelCase_ )
lowerCAmelCase__ = output.loss
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCAmelCase_ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
lowerCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
def _lowerCAmelCase( ) -> Optional[Any]:
lowerCAmelCase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=UpperCAmelCase_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 211
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCamelCase = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_UpperCamelCase = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_UpperCamelCase = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_UpperCamelCase = requests.get(image_url).content
_UpperCamelCase = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 211
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = ['''pixel_values''']
def __init__( self : Union[str, Any] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Any , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_)
_UpperCamelCase = size if size is not None else {"shortest_edge": 224}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
_UpperCamelCase = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size")
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase = do_convert_rgb
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_)
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
_UpperCamelCase = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_)
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = get_size_dict(lowercase_)
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Any , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : List[Any] , ) -> Optional[int]:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Dict , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : int , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_)
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase = make_list_of_images(lowercase_)
if not valid_images(lowercase_):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase = [convert_to_rgb(lowercase_) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowercase_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowercase_ , size=lowercase_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images]
_UpperCamelCase = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
| 547
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_deberta''': ['''DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DebertaConfig''', '''DebertaOnnxConfig'''],
'''tokenization_deberta''': ['''DebertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['''DebertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DebertaForMaskedLM''',
'''DebertaForQuestionAnswering''',
'''DebertaForSequenceClassification''',
'''DebertaForTokenClassification''',
'''DebertaModel''',
'''DebertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDebertaForMaskedLM''',
'''TFDebertaForQuestionAnswering''',
'''TFDebertaForSequenceClassification''',
'''TFDebertaForTokenClassification''',
'''TFDebertaModel''',
'''TFDebertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 547
| 1
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase: List[Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , *lowercase__ : int , **lowercase__ : int ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 716
|
def _lowerCamelCase ( snake_case = 50_000_000 ):
_lowerCAmelCase = set()
_lowerCAmelCase = int((limit - 24) ** (1 / 2) )
_lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case ) ) )
for primea in primes:
_lowerCAmelCase = primea * primea
for primea in primes:
_lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowerCAmelCase = primea * primea * primea * primea
_lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 225
| 0
|
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class a ( lowercase__ , lowercase__ ):
"""simple docstring"""
a : Dict = 1
@register_to_config
def __init__( self : int , __lowercase : int = 1000 , __lowercase : Optional[Union[np.ndarray, List[float]]] = None ) -> Union[str, Any]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__lowercase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : List[Any] = 4
# running values
__UpperCAmelCase : str = []
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : int , __lowercase : Union[str, torch.device] = None ) -> int:
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : Dict = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Dict = timesteps.to(__lowercase )
__UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase ( self : Optional[int] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[str] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : Optional[Any] = timestep_index + 1
__UpperCAmelCase : List[str] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__lowercase )
if len(self.ets ) == 1:
__UpperCAmelCase : Tuple = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : List[Any] = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : Union[str, Any] = self._get_prev_sample(__lowercase , __lowercase , __lowercase , __lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowercase )
def UpperCAmelCase ( self : Optional[Any] , __lowercase : torch.FloatTensor , *__lowercase : Optional[Any] , **__lowercase : Any ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict ) -> str:
__UpperCAmelCase : int = self.alphas[timestep_index]
__UpperCAmelCase : Tuple = self.betas[timestep_index]
__UpperCAmelCase : Any = self.alphas[prev_timestep_index]
__UpperCAmelCase : List[str] = self.betas[prev_timestep_index]
__UpperCAmelCase : List[str] = (sample - sigma * ets) / max(__lowercase , 1e-8 )
__UpperCAmelCase : List[Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ) -> str:
return self.config.num_train_timesteps
| 63
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : List[str] = tempfile.mkdtemp()
__UpperCamelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__UpperCamelCase : Optional[Any] = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
"do_convert_rgb": True,
}
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Any:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase : Dict = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
__UpperCamelCase : str = self.get_tokenizer()
__UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
__UpperCamelCase : Any = self.get_image_processor()
__UpperCamelCase : str = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
__UpperCamelCase : Union[str, Any] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase : Optional[Any] = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__UpperCamelCase : Tuple = self.get_image_processor(do_normalize=__UpperCamelCase )
__UpperCamelCase : List[Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Tuple = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCamelCase : List[str] = image_processor(__UpperCamelCase , return_tensors="np" )
__UpperCamelCase : List[Any] = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.get_image_processor()
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
__UpperCamelCase : int = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : int = processor(text=__UpperCamelCase )
__UpperCamelCase : int = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : List[str] = self.get_image_processor()
__UpperCamelCase : List[str] = self.get_tokenizer()
__UpperCamelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : str = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : List[Any] = self.prepare_image_inputs()
__UpperCamelCase : Union[str, Any] = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def __lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = self.get_image_processor()
__UpperCamelCase : Any = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase : str = processor.batch_decode(__UpperCamelCase )
__UpperCamelCase : Dict = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[int] = self.get_image_processor()
__UpperCamelCase : Tuple = self.get_tokenizer()
__UpperCamelCase : Dict = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__UpperCamelCase : Tuple = "Alexandra,T-shirt的价格是15便士。"
__UpperCamelCase : Optional[int] = self.prepare_image_inputs()
__UpperCamelCase : Tuple = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 327
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class a_ ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
a_ = tempfile.mkdtemp()
# fmt: off
a_ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
a_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a_ = {"""unk_token""": """<unk>"""}
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCAmelCase ) )
a_ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCAmelCase )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCAmelCase )
def lowerCAmelCase__ ( self , **UpperCAmelCase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
a_ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = self.get_image_processor()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
a_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
a_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ = self.get_image_processor(do_normalize=UpperCAmelCase )
a_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def lowerCAmelCase__ ( self ):
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ = self.prepare_image_inputs()
a_ = image_processor(UpperCAmelCase , return_tensors="""np""" )
a_ = processor(images=UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self ):
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ = """lower newer"""
a_ = processor(text=UpperCAmelCase , return_tensors="""np""" )
a_ = tokenizer(UpperCAmelCase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self ):
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ = """lower newer"""
a_ = self.prepare_image_inputs()
a_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCAmelCase__ ( self ):
a_ = """google/owlvit-base-patch32"""
a_ = OwlViTProcessor.from_pretrained(UpperCAmelCase )
a_ = ["""cat""", """nasa badge"""]
a_ = processor(text=UpperCAmelCase )
a_ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCAmelCase__ ( self ):
a_ = """google/owlvit-base-patch32"""
a_ = OwlViTProcessor.from_pretrained(UpperCAmelCase )
a_ = [["""cat""", """nasa badge"""], ["""person"""]]
a_ = processor(text=UpperCAmelCase )
a_ = 16
a_ = len(UpperCAmelCase )
a_ = max([len(UpperCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCAmelCase__ ( self ):
a_ = """google/owlvit-base-patch32"""
a_ = OwlViTProcessor.from_pretrained(UpperCAmelCase )
a_ = ["""cat""", """nasa badge"""]
a_ = processor(text=UpperCAmelCase )
a_ = 16
a_ = inputs["""input_ids"""]
a_ = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self ):
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ = self.prepare_image_inputs()
a_ = self.prepare_image_inputs()
a_ = processor(images=UpperCAmelCase , query_images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def lowerCAmelCase__ ( self ):
a_ = self.get_image_processor()
a_ = self.get_tokenizer()
a_ = OwlViTProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ = processor.batch_decode(UpperCAmelCase )
a_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
| 713
|
'''simple docstring'''
def UpperCamelCase_ ( A__ = 50 ):
a_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 511
| 0
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__a = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def __snake_case( _lowerCAmelCase=None ) -> Tuple:
if subparsers is not None:
snake_case__ : Union[str, Any] = subparsers.add_parser("""tpu-config""" , description=_description )
else:
snake_case__ : Dict = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
snake_case__ : List[Any] = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_lowerCAmelCase , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_lowerCAmelCase , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
snake_case__ : int = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_lowerCAmelCase , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Dict = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
snake_case__ : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Tuple = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : List[Any] = defaults.commands
if not args.tpu_name:
snake_case__ : Any = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : str = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
snake_case__ : int = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _lowerCAmelCase ):
snake_case__ : Dict = f"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
snake_case__ : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCAmelCase ):
snake_case__ : str = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : Optional[int] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f"pip install {args.accelerate_version}"]
new_cmd += args.command
snake_case__ : Any = """; """.join(_lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : int = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"Running {' '.join(_lowerCAmelCase )}" )
return
subprocess.run(_lowerCAmelCase )
print("""Successfully setup pod.""" )
def __snake_case( ) -> Optional[int]:
snake_case__ : List[Any] = tpu_command_parser()
snake_case__ : Tuple = parser.parse_args()
tpu_command_launcher(_lowerCAmelCase )
| 374
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__a = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def __snake_case( _lowerCAmelCase=True ) -> Dict:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a ) )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = None
lowercase = None
def lowerCamelCase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
with TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = dataset_module_factory(snake_case_ , cache_dir=snake_case_ )
snake_case__ : Optional[int] = import_main_class(dataset_module.module_path , dataset=snake_case_ )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=snake_case_ , config_name=snake_case_ , hash=dataset_module.hash , )
snake_case__ : Dict = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case_ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
snake_case__ : List[str] = cached_path(snake_case_ , cache_dir=snake_case_ )
self.assertTrue(os.path.exists(snake_case_ ) )
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> Tuple:
snake_case__ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
snake_case__ : Dict = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : Dict = import_main_class(dataset_module.module_path )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case__ : Any = None
builder_instance.download_and_prepare()
snake_case__ : Union[str, Any] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case( _lowerCAmelCase ) -> List[str]:
snake_case__ : Optional[int] = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
snake_case__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
snake_case__ : DatasetBuilder = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
snake_case__ : Any = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 374
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase_ : Tuple = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCAmelCase_ : List[Any] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowerCAmelCase_ : List[str] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any]) -> str:
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int = CHRF.CHAR_ORDER , __lowerCAmelCase : int = CHRF.WORD_ORDER , __lowerCAmelCase : int = CHRF.BETA , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ) -> Any:
lowercase_ = len(references[0])
if any(len(__lowerCAmelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
lowercase_ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase)]
lowercase_ = CHRF(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowercase_ = sb_chrf.corpus_score(__lowerCAmelCase , __lowerCAmelCase)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 461
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ : Tuple = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ : Dict = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =VOCAB_FILES_NAMES
lowerCamelCase_ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ =['input_ids', 'attention_mask']
lowerCamelCase_ =GPTaTokenizer
def __init__( self : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any="<|endoftext|>" , __lowerCAmelCase : Union[str, Any]="<|endoftext|>" , __lowerCAmelCase : List[Any]="<|endoftext|>" , __lowerCAmelCase : Optional[Any]=False , **__lowerCAmelCase : Dict , ) -> int:
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , __lowerCAmelCase)
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __lowerCAmelCase) != add_prefix_space:
lowercase_ = getattr(__lowerCAmelCase , pre_tok_state.pop("type"))
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**__lowerCAmelCase)
lowercase_ = add_prefix_space
def __UpperCAmelCase ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : str) -> BatchEncoding:
lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : List[Any] , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Union[str, Any]) -> BatchEncoding:
lowercase_ = kwargs.get("is_split_into_words" , __lowerCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None) -> Tuple[str]:
lowercase_ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase)
return tuple(__lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : "Conversation") -> List[int]:
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) + [self.eos_token_id])
if len(__lowerCAmelCase) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 461
| 1
|
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(a )
SCREAMING_SNAKE_CASE_ : Tuple = len(matrix[0] )
SCREAMING_SNAKE_CASE_ : int = min(a , a )
for row in range(a ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , a ):
SCREAMING_SNAKE_CASE_ : Any = matrix[col][row] / matrix[row][row]
for i in range(a , a ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for i in range(row + 1 , a ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(a ):
SCREAMING_SNAKE_CASE_ : Any = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class _A ( __magic_name__):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 511
| 1
|
"""simple docstring"""
_a : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ) -> list[str]:
_lowerCAmelCase : str = set()
# keep track of all the paths to be checked
_lowerCAmelCase : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCAmelCase : Optional[int] = queue.pop(0 )
# get the last node from the path
_lowerCAmelCase : Union[str, Any] = path[-1]
if node not in explored:
_lowerCAmelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCAmelCase : Optional[int] = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict ,_lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCAmelCase : Tuple = [start]
_lowerCAmelCase : Any = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
_lowerCAmelCase : int = {start: 0, target: -1}
while queue:
_lowerCAmelCase : Dict = queue.pop(0 )
if node == target:
_lowerCAmelCase : Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 663
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase_ : Optional[int] = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase_ : Dict = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 548
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 155
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase__ : int = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = ['''ViTFeatureExtractor''']
lowercase__ : Optional[int] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowercase__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 485
|
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Tuple = """linear"""
_lowerCAmelCase : Optional[int] = """cosine"""
_lowerCAmelCase : Optional[int] = """cosine_with_restarts"""
_lowerCAmelCase : Union[str, Any] = """polynomial"""
_lowerCAmelCase : Optional[Any] = """constant"""
_lowerCAmelCase : Optional[Any] = """constant_with_warmup"""
_lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def __lowercase ( _a , _a = -1 ):
return LambdaLR(_a , lambda _a : 1 , last_epoch=_a )
def __lowercase ( _a , _a , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1.0 , _a ) )
return 1.0
return LambdaLR(_a , _a , last_epoch=_a )
def __lowercase ( _a , _a , _a = -1 ):
snake_case_ : Dict = {}
snake_case_ : Optional[Any] = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
snake_case_, snake_case_ : Union[str, Any] = rule_str.split(''':''' )
snake_case_ : Dict = int(_a )
snake_case_ : int = float(_a )
snake_case_ : Any = value
snake_case_ : Any = float(rule_list[-1] )
def create_rules_function(_a , _a ):
def rule_func(_a ) -> float:
snake_case_ : Union[str, Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_a ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case_ : List[str] = create_rules_function(_a , _a )
return LambdaLR(_a , _a , last_epoch=_a )
def __lowercase ( _a , _a , _a , _a=-1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a = 0.5 , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
snake_case_ : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_a ) * 2.0 * progress )) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a = 1 , _a = -1 ):
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
snake_case_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_a ) * progress) % 1.0) )) )
return LambdaLR(_a , _a , _a )
def __lowercase ( _a , _a , _a , _a=1E-7 , _a=1.0 , _a=-1 ):
snake_case_ : List[str] = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_a ):
if current_step < num_warmup_steps:
return float(_a ) / float(max(1 , _a ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case_ : Tuple = lr_init - lr_end
snake_case_ : List[str] = num_training_steps - num_warmup_steps
snake_case_ : str = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_a , _a , _a )
lowercase__ : Dict = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowercase ( _a , _a , _a = None , _a = None , _a = None , _a = 1 , _a = 1.0 , _a = -1 , ):
snake_case_ : Union[str, Any] = SchedulerType(_a )
snake_case_ : Any = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_a , last_epoch=_a )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_a , step_rules=_a , last_epoch=_a )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_a , num_warmup_steps=_a , last_epoch=_a )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , num_cycles=_a , last_epoch=_a , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , power=_a , last_epoch=_a , )
return schedule_func(
_a , num_warmup_steps=_a , num_training_steps=_a , last_epoch=_a )
| 485
| 1
|
"""simple docstring"""
import functools
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(SCREAMING_SNAKE_CASE_ ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE_ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(SCREAMING_SNAKE_CASE_ ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
_lowerCamelCase : Optional[Any] = set(SCREAMING_SNAKE_CASE_ )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 434
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int =6_5521
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : List[str] = 0
for plain_chr in plain_text:
_lowerCamelCase : Dict = (a + ord(SCREAMING_SNAKE_CASE_ )) % MOD_ADLER
_lowerCamelCase : Tuple = (b + a) % MOD_ADLER
return (b << 16) | a
| 434
| 1
|
'''simple docstring'''
import argparse
import os
import re
_lowerCAmelCase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
_lowerCAmelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase = re.compile(R'''\[([^\]]+)\]''')
def __lowerCAmelCase ( snake_case__ ):
__UpperCamelCase : Union[str, Any] = _re_indent.search(snake_case__ )
return "" if search is None else search.groups()[0]
def __lowerCAmelCase ( snake_case__ , snake_case__="" , snake_case__=None , snake_case__=None ):
__UpperCamelCase : int = 0
__UpperCamelCase : int = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(snake_case__ ):
index += 1
__UpperCamelCase : Tuple = ["\n".join(lines[:index] )]
else:
__UpperCamelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__UpperCamelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(snake_case__ ) and (end_prompt is None or not lines[index].startswith(snake_case__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(snake_case__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(snake_case__ ) )
if index < len(snake_case__ ) - 1:
__UpperCamelCase : Any = [lines[index + 1]]
index += 1
else:
__UpperCamelCase : Union[str, Any] = []
else:
blocks.append("\n".join(snake_case__ ) )
__UpperCamelCase : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(snake_case__ ) > 0:
blocks.append("\n".join(snake_case__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(snake_case__ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def __lowerCAmelCase ( snake_case__ ):
def _inner(snake_case__ ):
return key(snake_case__ ).lower().replace("_" , "" )
return _inner
def __lowerCAmelCase ( snake_case__ , snake_case__=None ):
# If no key is provided, we use a noop.
def noop(snake_case__ ):
return x
if key is None:
__UpperCamelCase : Dict = noop
# Constants are all uppercase, they go first.
__UpperCamelCase : List[str] = [obj for obj in objects if key(snake_case__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__UpperCamelCase : Optional[Any] = [obj for obj in objects if key(snake_case__ )[0].isupper() and not key(snake_case__ ).isupper()]
# Functions begin with a lowercase, they go last.
__UpperCamelCase : List[Any] = [obj for obj in objects if not key(snake_case__ )[0].isupper()]
__UpperCamelCase : List[Any] = ignore_underscore(snake_case__ )
return sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ ) + sorted(snake_case__ , key=snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
# This inner function sort imports between [ ].
def _replace(snake_case__ ):
__UpperCamelCase : List[str] = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__UpperCamelCase : Optional[Any] = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(snake_case__ )] ) + "]"
__UpperCamelCase : Optional[int] = import_statement.split("\n" )
if len(snake_case__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__UpperCamelCase : Optional[int] = 2 if lines[1].strip() == "[" else 1
__UpperCamelCase : Union[str, Any] = [(i, _re_strip_line.search(snake_case__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__UpperCamelCase : Optional[Any] = sort_objects(snake_case__ , key=lambda snake_case__ : x[1] )
__UpperCamelCase : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(snake_case__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__UpperCamelCase : int = _re_bracket_content.sub(_replace , lines[1] )
else:
__UpperCamelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__UpperCamelCase : List[Any] = keys[:-1]
__UpperCamelCase : Optional[Any] = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(snake_case__ )] )
return "\n".join(snake_case__ )
else:
# Finally we have to deal with imports fitting on one line
__UpperCamelCase : Any = _re_bracket_content.sub(_replace , snake_case__ )
return import_statement
def __lowerCAmelCase ( snake_case__ , snake_case__=True ):
with open(snake_case__ , encoding="utf-8" ) as f:
__UpperCamelCase : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__UpperCamelCase : Dict = split_code_in_indented_blocks(
snake_case__ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(snake_case__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__UpperCamelCase : Optional[Any] = main_blocks[block_idx]
__UpperCamelCase : int = block.split("\n" )
# Get to the start of the imports.
__UpperCamelCase : Union[str, Any] = 0
while line_idx < len(snake_case__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__UpperCamelCase : str = len(snake_case__ )
else:
line_idx += 1
if line_idx >= len(snake_case__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__UpperCamelCase : List[str] = "\n".join(block_lines[line_idx:-1] )
__UpperCamelCase : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__UpperCamelCase : Optional[int] = split_code_in_indented_blocks(snake_case__ , indent_level=snake_case__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__UpperCamelCase : int = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__UpperCamelCase : str = [(pattern.search(snake_case__ ).groups()[0] if pattern.search(snake_case__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__UpperCamelCase : Any = [(i, key) for i, key in enumerate(snake_case__ ) if key is not None]
__UpperCamelCase : Optional[Any] = [x[0] for x in sorted(snake_case__ , key=lambda snake_case__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__UpperCamelCase : Optional[int] = 0
__UpperCamelCase : List[str] = []
for i in range(len(snake_case__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__UpperCamelCase : Optional[int] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(snake_case__ )
count += 1
# And we put our main block back together with its first and last line.
__UpperCamelCase : int = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(snake_case__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case__ ) )
def __lowerCAmelCase ( snake_case__=True ):
__UpperCamelCase : Dict = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
__UpperCamelCase : Tuple = sort_imports(os.path.join(snake_case__ , "__init__.py" ) , check_only=snake_case__ )
if result:
__UpperCamelCase : Tuple = [os.path.join(snake_case__ , "__init__.py" )]
if len(snake_case__ ) > 0:
raise ValueError(F"Would overwrite {len(snake_case__ )} files, run `make style`." )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 399
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.values[key]
def a_ (self ) -> Any:
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 399
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.