code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( lowerCAmelCase__ : Namespace ) -> Dict:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__snake_case :Tuple ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCAmelCase__ ( _lowerCamelCase ):
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : ArgumentParser ) -> Optional[int]:
A = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=__UpperCamelCase , required=__UpperCamelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=__UpperCamelCase , required=__UpperCamelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=__UpperCamelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=__UpperCamelCase , default=__UpperCamelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , *__UpperCamelCase : Dict , ) -> Optional[int]:
A = logging.get_logger('transformers-cli/converting' )
self._logger.info(f'''Loading model {model_type}''' )
A = model_type
A = tf_checkpoint
A = pytorch_dump_output
A = config
A = finetuning_task_name
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
A = self._tf_checkpoint
A = ''
else:
A = self._tf_checkpoint
A = ''
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCamelCase , self._config , self._pytorch_dump_output , __UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 106
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Any = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase__ : List[str] = Dataset.from_dict(UpperCAmelCase )
return dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __lowerCamelCase ( self : Optional[int] ) ->Tuple:
lowerCamelCase__ : Optional[Any] = get_dataset()
lowerCamelCase__ : int = make_duplicate_clusters(A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self : List[str] ) ->Any:
lowerCamelCase__ : str = get_dataset()
lowerCamelCase__ , lowerCamelCase__ : Any = deduplicate_dataset(A )
self.assertEqual(len(A ) , 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
| 315
| 0
|
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] ) -> None:
'''simple docstring'''
lowercase : dict[str, TrieNode] ={} # Mapping from char to TrieNode
lowercase : Optional[int] =False
def A__ ( self : Tuple , UpperCAmelCase : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : str ) -> None:
'''simple docstring'''
lowercase : Optional[int] =self
for char in word:
if char not in curr.nodes:
lowercase : Union[str, Any] =TrieNode()
lowercase : List[Any] =curr.nodes[char]
lowercase : Union[str, Any] =True
def A__ ( self : Union[str, Any] , UpperCAmelCase : str ) -> bool:
'''simple docstring'''
lowercase : List[Any] =self
for char in word:
if char not in curr.nodes:
return False
lowercase : Tuple =curr.nodes[char]
return curr.is_leaf
def A__ ( self : Dict , UpperCAmelCase : str ) -> None:
'''simple docstring'''
def _delete(UpperCAmelCase : TrieNode , UpperCAmelCase : str , UpperCAmelCase : int ) -> bool:
if index == len(UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
lowercase : Union[str, Any] =False
return len(curr.nodes ) == 0
lowercase : Dict =word[index]
lowercase : Dict =curr.nodes.get(UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowercase : Optional[int] =_delete(UpperCAmelCase , UpperCAmelCase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , UpperCAmelCase , 0 )
def lowercase_ ( __A : TrieNode , __A : str ) -> None:
"""simple docstring"""
if node.is_leaf:
print(__A , end=''' ''' )
for key, value in node.nodes.items():
print_words(__A , word + key )
def lowercase_ ( ) -> bool:
"""simple docstring"""
lowercase : Union[str, Any] ='''banana bananas bandana band apple all beast'''.split()
lowercase : Dict =TrieNode()
root.insert_many(__A )
# print_words(root, "")
assert all(root.find(__A ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def lowercase_ ( __A : str , __A : bool ) -> None:
"""simple docstring"""
print(str(__A ) , '''works!''' if passes else '''doesn\'t work :(''' )
def lowercase_ ( ) -> None:
"""simple docstring"""
assert test_trie()
def lowercase_ ( ) -> None:
"""simple docstring"""
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 8
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCamelCase__ ):
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.', lowercase_, )
super().__init__(*lowercase_, **lowercase_ )
| 40
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = ["""image_processor""", """tokenizer"""]
__lowercase = """CLIPImageProcessor"""
__lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self :List[Any] , lowercase_ :List[str]=None , lowercase_ :Any=None , **lowercase_ :Union[str, Any] )-> Optional[Any]:
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self :Optional[Any] , lowercase_ :Optional[int]=None , lowercase_ :Dict=None , lowercase_ :List[Any]=None , **lowercase_ :Optional[Any] )-> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
A__ = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCAmelCase_ ( self :int , *lowercase_ :Tuple , **lowercase_ :Any )-> Dict:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self :Any , *lowercase_ :Any , **lowercase_ :Any )-> List[str]:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self :List[Any] )-> str:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self :List[str] )-> List[Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 440
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case (__SCREAMING_SNAKE_CASE):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case ,"width_multiplier" ) )
class _snake_case :
def __init__( self ,_snake_case ,_snake_case=13 ,_snake_case=64 ,_snake_case=2 ,_snake_case=3 ,_snake_case="swish" ,_snake_case=3 ,_snake_case=32 ,_snake_case=0.1 ,_snake_case=0.02 ,_snake_case=True ,_snake_case=True ,_snake_case=10 ,_snake_case=None ,_snake_case=0.25 ,_snake_case=0.0 ,_snake_case=0.0 ,):
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : str = batch_size
UpperCAmelCase_ : str = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = make_divisible(5_12 * width_multiplier ,divisor=8 )
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = conv_kernel_size
UpperCAmelCase_ : Optional[int] = output_stride
UpperCAmelCase_ : str = classifier_dropout_prob
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = scope
UpperCAmelCase_ : Optional[int] = width_multiplier
UpperCAmelCase_ : List[Any] = ffn_dropout
UpperCAmelCase_ : Tuple = attn_dropout
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase_ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Any = MobileViTVaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Union[str, Any] = MobileViTVaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : int = self.num_labels
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase_ : Any = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase_ : Tuple = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs
UpperCAmelCase_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__A : List[str] =(
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : Dict =False
__A : Optional[int] =False
__A : int =False
__A : Optional[int] =False
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = MobileViTVaModelTester(self )
UpperCAmelCase_ : Optional[Any] = MobileViTVaConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def UpperCamelCase__ ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def UpperCamelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_snake_case )
UpperCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Dict = 5
self.assertEqual(len(_snake_case ) ,_snake_case )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : str = 2
for i in range(len(_snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : List[Any] = True
check_hidden_states_output(_snake_case ,_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_snake_case )
@slow
def UpperCamelCase__ ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = MobileViTVaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case (unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_snake_case )
UpperCAmelCase_ : Optional[Any] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case )
# verify the logits
UpperCAmelCase_ : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,_snake_case )
UpperCAmelCase_ : Any = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Union[str, Any] = model.to(_snake_case )
UpperCAmelCase_ : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**_snake_case )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,_snake_case )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=_snake_case ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,_snake_case ,atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : Any = model.to(_snake_case )
UpperCAmelCase_ : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : int = image_processor(images=_snake_case ,return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.logits.detach().cpu()
UpperCAmelCase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_snake_case ,target_sizes=[(50, 60)] )
UpperCAmelCase_ : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,_snake_case )
UpperCAmelCase_ : Any = image_processor.post_process_semantic_segmentation(outputs=_snake_case )
UpperCAmelCase_ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,_snake_case )
| 323
| 0
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _snake_case ):
__UpperCamelCase : torch.FloatTensor
__UpperCamelCase : Optional[torch.FloatTensor] = None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
@register_to_config
def __init__( self : Tuple ,lowerCamelCase : int = 1000 ,lowerCamelCase : str = "fixed_small_log" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[float] = 1.0 ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "squaredcos_cap_v2" ,):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
__SCREAMING_SNAKE_CASE = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# setable values
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 ,lowerCamelCase )[::-1].copy() )
__SCREAMING_SNAKE_CASE = variance_type
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : Any=None ):
'''simple docstring'''
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__SCREAMING_SNAKE_CASE = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = torch.log(torch.clamp(lowerCamelCase ,min=1E-2_0 ) )
__SCREAMING_SNAKE_CASE = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__SCREAMING_SNAKE_CASE = variance.log()
__SCREAMING_SNAKE_CASE = beta.log()
__SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
__SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : str ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Tuple=None ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,sample.shape[1] ,dim=1 )
else:
__SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
if prev_timestep is None:
__SCREAMING_SNAKE_CASE = t - 1
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE = self.betas[t]
__SCREAMING_SNAKE_CASE = self.alphas[t]
else:
__SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev
__SCREAMING_SNAKE_CASE = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = torch.clamp(
lowerCamelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__SCREAMING_SNAKE_CASE = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__SCREAMING_SNAKE_CASE = 0
if t > 0:
__SCREAMING_SNAKE_CASE = randn_tensor(
model_output.shape ,dtype=model_output.dtype ,generator=lowerCamelCase ,device=model_output.device )
__SCREAMING_SNAKE_CASE = self._get_variance(
lowerCamelCase ,predicted_variance=lowerCamelCase ,prev_timestep=lowerCamelCase ,)
if self.variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE = variance
elif self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
""" for the UnCLIPScheduler.""" )
__SCREAMING_SNAKE_CASE = variance * variance_noise
__SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase ,pred_original_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.IntTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 109
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase :
def __init__( self : str ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = psutil.Process()
lowerCamelCase__ : Union[str, Any] = False
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = -1
while True:
lowerCamelCase__ : Dict = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = threading.Thread(target=self.peak_monitor )
lowerCamelCase__ : Union[str, Any] = True
self.thread.start()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : int = False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# Time
lowerCamelCase__ : List[Any] = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
# Time
lowerCamelCase__ : Optional[int] = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowerCamelCase__ : Dict = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
lowerCamelCase__ : int = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowerCamelCase__ : List[str] = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
lowerCamelCase__ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
lowerCamelCase__ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 295
| 0
|
from ..utils import DummyObject, requires_backends
class a__( metaclass=lowerCamelCase__ ):
lowercase__ = ["""speech"""]
def __init__( self : int , *__snake_case : str , **__snake_case : Any ):
requires_backends(self , ['speech'] )
class a__( metaclass=lowerCamelCase__ ):
lowercase__ = ["""speech"""]
def __init__( self : List[str] , *__snake_case : Tuple , **__snake_case : Tuple ):
requires_backends(self , ['speech'] )
| 702
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase: Optional[Any] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , *__snake_case : List[str] , **__snake_case : Union[str, Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , __snake_case , )
super().__init__(*__snake_case , **__snake_case )
| 195
| 0
|
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 78
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class __lowercase ( A ):
__magic_name__ : List[Any] = '''efficientnet'''
def __init__( self , a__ = 3 , a__ = 6_0_0 , a__ = 2.0 , a__ = 3.1 , a__ = 8 , a__ = [3, 3, 5, 3, 5, 5, 3] , a__ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , a__ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , a__ = [] , a__ = [1, 2, 2, 2, 1, 2, 1] , a__ = [1, 2, 2, 3, 3, 4, 1] , a__ = [1, 6, 6, 6, 6, 6, 6] , a__ = 0.25 , a__ = "swish" , a__ = 2_5_6_0 , a__ = "mean" , a__ = 0.02 , a__ = 0.0_01 , a__ = 0.99 , a__ = 0.5 , a__ = 0.2 , **a__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**a__ )
A_ = num_channels
A_ = image_size
A_ = width_coefficient
A_ = depth_coefficient
A_ = depth_divisor
A_ = kernel_sizes
A_ = in_channels
A_ = out_channels
A_ = depthwise_padding
A_ = strides
A_ = num_block_repeats
A_ = expand_ratios
A_ = squeeze_expansion_ratio
A_ = hidden_act
A_ = hidden_dim
A_ = pooling_type
A_ = initializer_range
A_ = batch_norm_eps
A_ = batch_norm_momentum
A_ = dropout_rate
A_ = drop_connect_rate
A_ = sum(a__ ) * 4
class __lowercase ( A ):
__magic_name__ : Any = version.parse('''1.11''' )
@property
def lowerCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
return 1E-5
| 141
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class a ( __UpperCAmelCase ):
lowercase_ : Union[str, Any] = 'beit'
def __init__( self : Optional[Any] , snake_case__ : List[Any]=8_192 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : Optional[int]=3_072 , snake_case__ : Optional[int]="gelu" , snake_case__ : str=0.0 , snake_case__ : Optional[Any]=0.0 , snake_case__ : int=0.0_2 , snake_case__ : Optional[int]=1E-12 , snake_case__ : Dict=224 , snake_case__ : str=16 , snake_case__ : Optional[Any]=3 , snake_case__ : int=False , snake_case__ : List[str]=False , snake_case__ : Tuple=False , snake_case__ : Dict=False , snake_case__ : List[str]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=True , snake_case__ : List[Any]=[3, 5, 7, 11] , snake_case__ : Optional[int]=[1, 2, 3, 6] , snake_case__ : str=True , snake_case__ : Any=0.4 , snake_case__ : Dict=256 , snake_case__ : str=1 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=255 , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_mask_token
__lowerCAmelCase = use_absolute_position_embeddings
__lowerCAmelCase = use_relative_position_bias
__lowerCAmelCase = use_shared_relative_position_bias
__lowerCAmelCase = layer_scale_init_value
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase = out_indices
__lowerCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = auxiliary_channels
__lowerCAmelCase = auxiliary_num_convs
__lowerCAmelCase = auxiliary_concat_input
__lowerCAmelCase = semantic_loss_ignore_index
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return 1E-4
| 376
|
import math
import sys
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = ""
try:
with open(UpperCamelCase , "rb" ) as binary_file:
__lowerCAmelCase = binary_file.read()
for dat in data:
__lowerCAmelCase = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = {"0": "0", "1": "1"}
__lowerCAmelCase , __lowerCAmelCase = "", ""
__lowerCAmelCase = len(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase = lexicon[curr_string]
result += last_match_id
__lowerCAmelCase = last_match_id + "0"
if math.loga(UpperCamelCase ).is_integer():
__lowerCAmelCase = {}
for curr_key in list(UpperCamelCase ):
__lowerCAmelCase = lexicon.pop(UpperCamelCase )
__lowerCAmelCase = new_lex
__lowerCAmelCase = last_match_id + "1"
index += 1
__lowerCAmelCase = ""
return result
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 8
try:
with open(UpperCamelCase , "wb" ) as opened_file:
__lowerCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(UpperCamelCase ) , UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def _UpperCAmelCase ( UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__lowerCAmelCase = data_bits[counter:]
__lowerCAmelCase = data_bits[counter + 1 :]
return data_bits
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: str ):
"""simple docstring"""
__lowerCAmelCase = read_file_binary(UpperCamelCase )
__lowerCAmelCase = remove_prefix(UpperCamelCase )
__lowerCAmelCase = decompress_data(UpperCamelCase )
write_file_binary(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 376
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = KandinskyVaaControlnetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE__ : int = ["image_embeds", "negative_image_embeds", "image", "hint"]
SCREAMING_SNAKE_CASE__ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE__ : List[Any] = False
@property
def A_ ( self ):
'''simple docstring'''
return 3_2
@property
def A_ ( self ):
'''simple docstring'''
return 3_2
@property
def A_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def A_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self ):
'''simple docstring'''
return 1_0_0
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase : Optional[Any] = UNetaDConditionModel(**snake_case )
return model
@property
def A_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.dummy_unet
UpperCAmelCase : Tuple = self.dummy_movq
UpperCAmelCase : str = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase : Tuple = DDIMScheduler(**snake_case )
UpperCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case )
# create init_image
UpperCAmelCase : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case ) ).to(snake_case )
UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Tuple = Image.fromarray(np.uinta(snake_case ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith("mps" ):
UpperCAmelCase : Union[str, Any] = torch.manual_seed(snake_case )
else:
UpperCAmelCase : Tuple = torch.Generator(device=snake_case ).manual_seed(snake_case )
UpperCAmelCase : Optional[int] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = "cpu"
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Any = self.pipeline_class(**snake_case )
UpperCAmelCase : List[str] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(snake_case ) )
UpperCAmelCase : Any = output.images
UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(snake_case ) , return_dict=snake_case , )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase : Dict = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
UpperCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase : List[Any] = init_image.resize((5_1_2, 5_1_2) )
UpperCAmelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
UpperCAmelCase : Dict = torch.from_numpy(np.array(snake_case ) ).float() / 255.0
UpperCAmelCase : Tuple = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Any = "A robot, 4k photo"
UpperCAmelCase : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case )
UpperCAmelCase : Dict = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
UpperCAmelCase : Any = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = pipe_prior(
snake_case , image=snake_case , strength=0.85 , generator=snake_case , negative_prompt="" , ).to_tuple()
UpperCAmelCase : Optional[int] = pipeline(
image=snake_case , image_embeds=snake_case , negative_image_embeds=snake_case , hint=snake_case , generator=snake_case , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
UpperCAmelCase : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(snake_case , snake_case )
| 679
|
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : str = logging.getLogger(__name__)
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self , snake_case , snake_case , snake_case=None , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer[current_layer](snake_case , snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[int] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Dict = BertEncoderWithPabee(snake_case )
self.init_weights()
UpperCAmelCase : int = 0
UpperCAmelCase : Dict = 0
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = threshold
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = patience
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = 0
UpperCAmelCase : List[Any] = 0
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : List[Any] = (
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(snake_case )
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCAmelCase : Dict = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : Any = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCAmelCase : Optional[int] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Tuple = torch.ones(snake_case , device=snake_case )
if token_type_ids is None:
UpperCAmelCase : List[Any] = torch.zeros(snake_case , dtype=torch.long , device=snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(snake_case , snake_case , snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = encoder_hidden_states.size()
UpperCAmelCase : List[str] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : int = torch.ones(snake_case , device=snake_case )
UpperCAmelCase : str = self.invert_attention_mask(snake_case )
else:
UpperCAmelCase : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : Dict = self.get_head_mask(snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Tuple = self.embeddings(
input_ids=snake_case , position_ids=snake_case , token_type_ids=snake_case , inputs_embeds=snake_case )
UpperCAmelCase : int = embedding_output
if self.training:
UpperCAmelCase : int = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : List[Any] = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Dict = self.pooler(snake_case )
UpperCAmelCase : List[Any] = output_layers[i](output_dropout(snake_case ) )
res.append(snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Union[str, Any] = self.encoder(
snake_case , attention_mask=snake_case , head_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase : Optional[int] = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[str] = [output_layers[self.config.num_hidden_layers - 1](snake_case )]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Tuple = self.encoder.adaptive_forward(
snake_case , current_layer=snake_case , attention_mask=snake_case , head_mask=snake_case )
UpperCAmelCase : Any = self.pooler(snake_case )
UpperCAmelCase : int = output_layers[i](snake_case )
if regression:
UpperCAmelCase : Optional[Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : Union[str, Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Optional[Any] = 0
else:
UpperCAmelCase : Any = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : Tuple = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : str = 0
UpperCAmelCase : int = logits
if patient_counter == self.patience:
break
UpperCAmelCase : int = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowercase__ , )
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
super().__init__(snake_case )
UpperCAmelCase : Union[str, Any] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(snake_case )
UpperCAmelCase : Optional[int] = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Any = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case )
def A_ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : int = self.bert(
input_ids=snake_case , attention_mask=snake_case , token_type_ids=snake_case , position_ids=snake_case , head_mask=snake_case , inputs_embeds=snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : Tuple = (logits[-1],)
if labels is not None:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : List[Any] = 0
for ix, logits_item in enumerate(snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Dict = MSELoss()
UpperCAmelCase : Union[str, Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Optional[int] = CrossEntropyLoss()
UpperCAmelCase : Tuple = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : int = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Tuple = (total_loss / total_weights,) + outputs
return outputs
| 679
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
class __snake_case ( _lowerCamelCase ):
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 699
|
from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699
| 1
|
'''simple docstring'''
import pytest
lowerCAmelCase : Optional[Any] = """__dummy_dataset1__"""
lowerCAmelCase : Optional[Any] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _A ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ) -> Tuple:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( A ,A ,A ) -> str:
lowercase : Union[str, Any] = dataset_loading_script_name
lowercase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=A )
lowercase : str = script_dir / F'''{script_name}.py'''
with open(A ,"w" ) as f:
f.write(A )
return str(A )
| 372
|
import copy
import random
from transformers import CLIPTokenizer
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*__magic_name__ , **__magic_name__ )
lowerCamelCase : Dict = {}
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , **__magic_name__ ):
lowerCamelCase : Any = super().add_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=1 , **__magic_name__ ):
lowerCamelCase : List[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
else:
lowerCamelCase : Dict = []
for i in range(__magic_name__ ):
lowerCamelCase : Optional[Any] = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__magic_name__ , *__magic_name__ , **__magic_name__ )
output.append(__magic_name__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowerCamelCase : Any = output
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=False , __magic_name__=1.0 ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase : List[str] = []
for i in range(len(__magic_name__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__magic_name__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCamelCase : List[str] = self.token_map[placeholder_token]
lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(__magic_name__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCamelCase : Union[str, Any] = copy.copy(__magic_name__ )
random.shuffle(__magic_name__ )
lowerCamelCase : str = text.replace(__magic_name__ , """ """.join(__magic_name__ ) )
return text
def __call__( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
def UpperCamelCase__ ( self , __magic_name__ , *__magic_name__ , __magic_name__=False , __magic_name__=1.0 , **__magic_name__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__magic_name__ , vector_shuffle=__magic_name__ , prop_tokens_to_load=__magic_name__ ) , *__magic_name__ , **__magic_name__ , )
| 681
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCAmelCase = 'A red cat sitting on a park bench'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCAmelCase = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
_lowerCAmelCase = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase__ )
_lowerCAmelCase = 'A red cat sitting on a park bench'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=lowercase__ , image=lowercase__ , mask_image=lowercase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
_lowerCAmelCase = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 225
|
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =OpenAIGPTTokenizer
UpperCamelCase__ =OpenAIGPTTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_lowerCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
_lowerCAmelCase = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(lowercase__ ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : str ):
return "lower newer", "lower newer"
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
_lowerCAmelCase = 'lower'
_lowerCAmelCase = ['low', 'er</w>']
_lowerCAmelCase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
_lowerCAmelCase = tokens + ['<unk>']
_lowerCAmelCase = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : Union[str, Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
# Simple input
_lowerCAmelCase = 'This is a simple input'
_lowerCAmelCase = ['This is a simple input 1', 'This is a simple input 2']
_lowerCAmelCase = ('This is a simple input', 'This is a pair')
_lowerCAmelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='max_length' )
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='max_length' )
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='max_length' , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='max_length' )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='max_length' )
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='max_length' , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase ):
pass
| 225
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__:
lowerCAmelCase = XGLMConfig
lowerCAmelCase = {}
lowerCAmelCase = "gelu"
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=14 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : List[str]=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=5_12 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = ffn_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 1
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _a ( self : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _a ( self : int ) -> Any:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _a ( self : str ) -> Optional[int]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class A__( unittest.TestCase ):
@slow
def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int]=True ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__SCREAMING_SNAKE_CASE = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
__SCREAMING_SNAKE_CASE = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
__SCREAMING_SNAKE_CASE = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
__SCREAMING_SNAKE_CASE = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
__SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
__SCREAMING_SNAKE_CASE = '''left'''
# use different length sentences to test batching
__SCREAMING_SNAKE_CASE = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
__SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='''tf''' , padding=snake_case__ )
__SCREAMING_SNAKE_CASE = inputs['''input_ids''']
__SCREAMING_SNAKE_CASE = model.generate(input_ids=snake_case__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__SCREAMING_SNAKE_CASE = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
__SCREAMING_SNAKE_CASE = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 482
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 644
| 0
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _SCREAMING_SNAKE_CASE( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) ->Optional[Any]:
'''simple docstring'''
_lowercase : Dict = [0] * no_of_processes
_lowercase : Optional[Any] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = burst_time[i]
_lowercase : Any = 0
_lowercase : Dict = 0
_lowercase : str = 9_99_99_99_99
_lowercase : Optional[int] = 0
_lowercase : Union[str, Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_lowercase : str = remaining_time[j]
_lowercase : List[Any] = j
_lowercase : Union[str, Any] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_lowercase : Optional[Any] = remaining_time[short]
if minm == 0:
_lowercase : Union[str, Any] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
_lowercase : Dict = False
# Find finish time of current process
_lowercase : Optional[Any] = increment_time + 1
# Calculate waiting time
_lowercase : Union[str, Any] = finish_time - arrival_time[short]
_lowercase : List[str] = finar - burst_time[short]
if waiting_time[short] < 0:
_lowercase : int = 0
# Increment time
increment_time += 1
return waiting_time
def _SCREAMING_SNAKE_CASE( snake_case_ : list[int] , snake_case_ : int , snake_case_ : list[int] ) ->Optional[Any]:
'''simple docstring'''
_lowercase : int = [0] * no_of_processes
for i in range(_SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def _SCREAMING_SNAKE_CASE( snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int ) ->List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = 0
_lowercase : int = 0
for i in range(_SCREAMING_SNAKE_CASE ):
_lowercase : Union[str, Any] = total_waiting_time + waiting_time[i]
_lowercase : str = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
lowerCamelCase_ = int(input())
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = [0] * no_of_processes
lowerCamelCase_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
lowerCamelCase_ = map(int, input().split())
lowerCamelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase_ = burst_time
lowerCamelCase_ = no_of_processes
lowerCamelCase_ = waiting_time
lowerCamelCase_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 707
|
'''simple docstring'''
lowerCamelCase__ = 2_56
# Modulus to hash a string
lowerCamelCase__ = 1_00_00_03
def _SCREAMING_SNAKE_CASE( snake_case_ : str , snake_case_ : str ) ->bool:
'''simple docstring'''
_lowercase : int = len(snake_case_ )
_lowercase : str = len(snake_case_ )
if p_len > t_len:
return False
_lowercase : List[str] = 0
_lowercase : Any = 0
_lowercase : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case_ ):
_lowercase : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_lowercase : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_lowercase : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_lowercase : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _SCREAMING_SNAKE_CASE( ) ->None:
'''simple docstring'''
_lowercase : List[str] = '''abc1abc12'''
_lowercase : int = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
_lowercase : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(snake_case_ , snake_case_ ) and not rabin_karp(snake_case_ , snake_case_ )
# Test 2)
_lowercase : int = '''ABABX'''
_lowercase : Any = '''ABABZABABYABABX'''
assert rabin_karp(snake_case_ , snake_case_ )
# Test 3)
_lowercase : Tuple = '''AAAB'''
_lowercase : Tuple = '''ABAAAAAB'''
assert rabin_karp(snake_case_ , snake_case_ )
# Test 4)
_lowercase : Dict = '''abcdabcy'''
_lowercase : List[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(snake_case_ , snake_case_ )
# Test 5)
_lowercase : Tuple = '''Lü'''
_lowercase : Any = '''Lüsai'''
assert rabin_karp(snake_case_ , snake_case_ )
_lowercase : Tuple = '''Lue'''
assert not rabin_karp(snake_case_ , snake_case_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 411
| 0
|
a_ = "Tobias Carryer"
from time import time
class _lowercase :
def __init__( self : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : List[Any] , snake_case : int=int(time() ) ) -> Tuple: # noqa: B008
"""simple docstring"""
UpperCamelCase_ : str = multiplier
UpperCamelCase_ : List[Any] = increment
UpperCamelCase_ : Any = modulo
UpperCamelCase_ : Optional[Any] = seed
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a_ = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 417
|
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE : str = getLogger(__name__)
def UpperCamelCase ( _a , _a , _a , _a = 8 , _a = 1_0_2_4 , _a="val" , _a=None , _a=False , _a="summarization" , _a=None , _a=1 , _a = None , _a="" , **_a , ) -> Dict:
'''simple docstring'''
lowercase_ :Union[str, Any] = str(_a )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_a )
lowercase_ :int = Path(_a )
lowercase_ :Optional[int] = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(_a )
lowercase_ :int = AutoModelForSeqaSeqLM.from_pretrained(_a ).cuda()
if fpaa:
lowercase_ :Union[str, Any] = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_a , _a ) # update config with task specific params
lowercase_ :Union[str, Any] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
lowercase_ :Tuple = num_return_sequences
lowercase_ :Dict = AutoTokenizer.from_pretrained(_a )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
lowercase_ :List[str] = tokenizer.model_max_length
if prefix is None:
lowercase_ :Tuple = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
lowercase_ :str = SeqaSeqDataset(
_a , _a , _a , max_target_length=1_0_2_4 , type_path=_a , n_obs=_a , prefix=_a , **_a , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
lowercase_ :List[Any] = ds.make_sortish_sampler(_a , distributed=_a , add_extra_examples=_a , shuffle=_a )
lowercase_ :Optional[Any] = DataLoader(_a , sampler=_a , batch_size=_a , collate_fn=ds.collate_fn )
lowercase_ :List[Any] = []
for batch in tqdm(_a ):
lowercase_ :Tuple = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_a , num_beams=_a , **_a , )
lowercase_ :Tuple = tokenizer.batch_decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
lowercase_ :Dict = batch['''ids''']
if num_return_sequences > 1:
lowercase_ :int = chunks(_a , _a ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_a ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_a , _a )
return results, sampler.num_replicas
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Tuple = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_a , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_a , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_a , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_a , default=_a )
parser.add_argument(
'''--type_path''' , type=_a , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_a , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_a , default=8 , required=_a , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_a , default=-1 , required=_a , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_a , default=_a , required=_a , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_a , default=1 , required=_a , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_a , default=6_0_0 , required=_a , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_a , default=_a , required=_a )
parser.add_argument('''--tgt_lang''' , type=_a , default=_a , required=_a )
parser.add_argument(
'''--prefix''' , type=_a , required=_a , default=_a , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
lowercase_ :str = time.time()
lowercase_ , lowercase_ :Optional[Any] = parser.parse_known_args()
lowercase_ :Dict = parse_numeric_n_bool_cl_kwargs(_a )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
lowercase_ :str = Path(args.save_dir + '''_tmp''' )
Path(_a ).mkdir(exist_ok=_a ) # this handles locking.
lowercase_ :str = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
lowercase_ :Optional[int] = {}
if args.src_lang is not None:
lowercase_ :Any = args.src_lang
if args.tgt_lang is not None:
lowercase_ :int = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_a )
lowercase_ , lowercase_ :Tuple = eval_data_dir(
args.data_dir , _a , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_a , **_a , )
if args.local_rank <= 0:
lowercase_ :int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_a )
lowercase_ :Optional[Any] = gather_results_from_each_node(_a , _a , args.sync_timeout )
lowercase_ :Tuple = combine_partial_results(_a )
if args.num_return_sequences > 1:
lowercase_ :Optional[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(_a , _a )
return
lowercase_ :List[Any] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_a ) as f:
lowercase_ :Optional[int] = [x.rstrip() for x in f.readlines()][: len(_a )]
# Calculate metrics, save metrics, and save _generations.txt
lowercase_ :Optional[int] = '''translation''' in args.task
lowercase_ :str = calculate_bleu if calc_bleu else calculate_rouge
lowercase_ :List[str] = '''bleu''' if calc_bleu else '''rouge'''
lowercase_ :Dict = score_fn(_a , _a )
lowercase_ :Optional[Any] = len(_a )
lowercase_ :str = time.time() - start_time
lowercase_ :int = round(runtime / metrics['''n_obs'''] , 4 )
lowercase_ :Any = num_replicas
# TODO(@stas00): add whatever metadata to metrics
lowercase_ :int = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(_a , _a , indent=_a )
print(_a )
write_txt_file(_a , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(_a , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(_a )
def UpperCamelCase ( _a ) -> List:
'''simple docstring'''
lowercase_ :str = []
for partial_result in partial_results:
records.extend(_a )
lowercase_ :Optional[Any] = sorted(_a , key=lambda _a : x["id"] )
lowercase_ :Optional[Any] = [x['''pred'''] for x in records]
return preds
def UpperCamelCase ( _a , _a , _a ) -> List[Dict[str, List]]:
'''simple docstring'''
lowercase_ :int = time.time()
logger.info('''waiting for all nodes to finish''' )
lowercase_ :int = None
while (time.time() - start_wait) < timeout:
lowercase_ :int = list(save_dir.glob('''rank_*.json''' ) )
if len(_a ) < num_replicas:
continue
try:
# make sure all json files are fully saved
lowercase_ :Dict = lmap(_a , _a )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 257
| 0
|
'''simple docstring'''
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 593
|
'''simple docstring'''
from collections import defaultdict
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = 1
snake_case_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(__UpperCAmelCase )
if ret % 2 == 0:
cuts.append(__UpperCAmelCase )
return ret
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a ,a : int = 10, 9
a : Any = defaultdict(list)
a : dict[int, bool] = {}
a : list[int] = []
a : List[Any] = 0
a : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 593
| 1
|
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowerCAmelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowerCAmelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
__lowerCAmelCase = """zero2"""
__lowerCAmelCase = """zero3"""
__lowerCAmelCase = [ZEROa, ZEROa]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = parameterized.to_safe_name("""_""".join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__lowerCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_lowerCAmelCase , name_func=_lowerCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> str:
self.run_and_check(
stage=_lowerCAmelCase , model=_lowerCAmelCase , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
def lowercase (self , UpperCAmelCase ) -> Any:
pass
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 10 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = True , ) -> str:
_snake_case = models[model]
_snake_case = self.run_trainer(
stage=_lowerCAmelCase , model_name=_lowerCAmelCase , eval_steps=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , fpaa=_lowerCAmelCase , )
self.do_checks(_lowerCAmelCase )
return output_dir
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 10 , UpperCAmelCase = 1 , UpperCAmelCase = True , UpperCAmelCase = True , ) -> Dict:
_snake_case = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowerCAmelCase )
_snake_case = f"""\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n """.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_snake_case = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_snake_case = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_snake_case = self.get_launcher(_lowerCAmelCase )
_snake_case = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
return output_dir
def lowercase (self , UpperCAmelCase=False ) -> Dict:
_snake_case = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 585
|
from math import factorial
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase ) // (factorial(lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 80
| 0
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowerCamelCase ( _A ):
snake_case_ = ["image_processor"]
snake_case_ = "SamImageProcessor"
def __init__( self , a_ ):
super().__init__(a_ )
lowerCAmelCase : Optional[Any] = self.image_processor
lowerCAmelCase : int = -10
lowerCAmelCase : Dict = self.image_processor.size["longest_edge"]
def __call__( self , a_=None , a_=None , a_=None , a_=None , a_ = None , **a_ , ):
lowerCAmelCase : List[str] = self.image_processor(
a_ , return_tensors=a_ , **a_ , )
# pop arguments that are not used in the foward but used nevertheless
lowerCAmelCase : str = encoding_image_processor["original_sizes"]
if hasattr(a_ , "numpy" ): # Checks if Torch or TF tensor
lowerCAmelCase : Dict = original_sizes.numpy()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = self._check_and_preprocess_points(
input_points=a_ , input_labels=a_ , input_boxes=a_ , )
lowerCAmelCase : str = self._normalize_and_convert(
a_ , a_ , input_points=a_ , input_labels=a_ , input_boxes=a_ , return_tensors=a_ , )
return encoding_image_processor
def _lowerCamelCase ( self , a_ , a_ , a_=None , a_=None , a_=None , a_="pt" , ):
if input_points is not None:
if len(a_ ) != len(a_ ):
lowerCAmelCase : Optional[int] = [
self._normalize_coordinates(self.target_size , a_ , original_sizes[0] ) for point in input_points
]
else:
lowerCAmelCase : Dict = [
self._normalize_coordinates(self.target_size , a_ , a_ )
for point, original_size in zip(a_ , a_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowerCAmelCase , lowerCAmelCase : Tuple = self._pad_points_and_labels(a_ , a_ )
lowerCAmelCase : int = np.array(a_ )
if input_labels is not None:
lowerCAmelCase : str = np.array(a_ )
if input_boxes is not None:
if len(a_ ) != len(a_ ):
lowerCAmelCase : str = [
self._normalize_coordinates(self.target_size , a_ , original_sizes[0] , is_bounding_box=a_ )
for box in input_boxes
]
else:
lowerCAmelCase : List[Any] = [
self._normalize_coordinates(self.target_size , a_ , a_ , is_bounding_box=a_ )
for box, original_size in zip(a_ , a_ )
]
lowerCAmelCase : str = np.array(a_ )
if input_boxes is not None:
if return_tensors == "pt":
lowerCAmelCase : Tuple = torch.from_numpy(a_ )
# boxes batch size of 1 by default
lowerCAmelCase : Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowerCAmelCase : Any = tf.convert_to_tensor(a_ )
# boxes batch size of 1 by default
lowerCAmelCase : int = tf.expand_dims(a_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowerCAmelCase : Dict = torch.from_numpy(a_ )
# point batch size of 1 by default
lowerCAmelCase : Any = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(a_ )
# point batch size of 1 by default
lowerCAmelCase : int = tf.expand_dims(a_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowerCAmelCase : Dict = torch.from_numpy(a_ )
# point batch size of 1 by default
lowerCAmelCase : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowerCAmelCase : List[str] = tf.convert_to_tensor(a_ )
# point batch size of 1 by default
lowerCAmelCase : Union[str, Any] = tf.expand_dims(a_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : Tuple = max([point.shape[0] for point in input_points] )
lowerCAmelCase : Union[str, Any] = []
for i, point in enumerate(a_ ):
if point.shape[0] != expected_nb_points:
lowerCAmelCase : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowerCAmelCase : Any = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(a_ )
lowerCAmelCase : Union[str, Any] = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , a_ , a_ , a_ , a_=False ):
lowerCAmelCase , lowerCAmelCase : Any = original_size
lowerCAmelCase , lowerCAmelCase : List[str] = self.image_processor._get_preprocess_shape(a_ , longest_edge=a_ )
lowerCAmelCase : Optional[int] = deepcopy(a_ ).astype(a_ )
if is_bounding_box:
lowerCAmelCase : Union[str, Any] = coords.reshape(-1 , 2 , 2 )
lowerCAmelCase : Any = coords[..., 0] * (new_w / old_w)
lowerCAmelCase : int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowerCAmelCase : Tuple = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , a_=None , a_=None , a_=None , ):
if input_points is not None:
if hasattr(a_ , "numpy" ): # Checks for TF or Torch tensor
lowerCAmelCase : int = input_points.numpy().tolist()
if not isinstance(a_ , a_ ) or not isinstance(input_points[0] , a_ ):
raise ValueError("Input points must be a list of list of floating points." )
lowerCAmelCase : List[Any] = [np.array(a_ ) for input_point in input_points]
else:
lowerCAmelCase : List[str] = None
if input_labels is not None:
if hasattr(a_ , "numpy" ):
lowerCAmelCase : Optional[int] = input_labels.numpy().tolist()
if not isinstance(a_ , a_ ) or not isinstance(input_labels[0] , a_ ):
raise ValueError("Input labels must be a list of list integers." )
lowerCAmelCase : Dict = [np.array(a_ ) for label in input_labels]
else:
lowerCAmelCase : Any = None
if input_boxes is not None:
if hasattr(a_ , "numpy" ):
lowerCAmelCase : Any = input_boxes.numpy().tolist()
if (
not isinstance(a_ , a_ )
or not isinstance(input_boxes[0] , a_ )
or not isinstance(input_boxes[0][0] , a_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
lowerCAmelCase : Union[str, Any] = [np.array(a_ ).astype(np.floataa ) for box in input_boxes]
else:
lowerCAmelCase : Dict = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(a_ ) )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.image_processor.post_process_masks(*a_ , **a_ )
| 551
|
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "longformer"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Union[List[int], int] = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 0 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 30_522 , _lowerCAmelCase : int = 768 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 12 , _lowerCAmelCase : int = 3_072 , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : int = 512 , _lowerCAmelCase : int = 2 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1E-12 , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Union[str, Any] , ):
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = attention_window
SCREAMING_SNAKE_CASE_ = sep_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = onnx_export
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : "PretrainedConfig" , _lowerCAmelCase : str = "default" , _lowerCAmelCase : "List[PatchingSpec]" = None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = True
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
return outputs
@property
def lowerCAmelCase_ ( self : str ):
return 1E-4
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : "PreTrainedTokenizerBase" , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(
preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
SCREAMING_SNAKE_CASE_ = 1
return inputs
| 31
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[int] = """falcon"""
_lowerCAmelCase : List[Any] = ["""past_key_values"""]
def __init__( self : Any , lowercase_ : Any=65024 , lowercase_ : Optional[Any]=4544 , lowercase_ : List[str]=32 , lowercase_ : List[str]=71 , lowercase_ : List[str]=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : int=True , lowercase_ : int=0.0 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=False , lowercase_ : str=True , lowercase_ : Tuple=True , lowercase_ : str=False , lowercase_ : Optional[int]=11 , lowercase_ : List[str]=11 , **lowercase_ : Optional[Any] , ):
snake_case_ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
snake_case_ : str = kwargs.pop('''n_embed''' , lowercase_ )
snake_case_ : Dict = hidden_size if n_embed is None else n_embed
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = layer_norm_epsilon
snake_case_ : Any = initializer_range
snake_case_ : Any = use_cache
snake_case_ : int = hidden_dropout
snake_case_ : List[Any] = attention_dropout
snake_case_ : Optional[Any] = bos_token_id
snake_case_ : List[Any] = eos_token_id
snake_case_ : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case_ : Union[str, Any] = alibi
snake_case_ : Union[str, Any] = new_decoder_architecture
snake_case_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True
snake_case_ : str = parallel_attn
snake_case_ : List[Any] = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self : str ):
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self : Optional[Any] ):
return not self.alibi
| 715
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( lowerCAmelCase__):
def _snake_case ( self : int ):
snake_case_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase_ , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(lowercase_ , '''num_heads''' ) )
class _UpperCAmelCase :
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : int=13 , lowercase_ : Optional[int]=64 , lowercase_ : Any=3 , lowercase_ : Any=[16, 48, 96] , lowercase_ : List[Any]=[1, 3, 6] , lowercase_ : Union[str, Any]=[1, 2, 10] , lowercase_ : Optional[Any]=[7, 3, 3] , lowercase_ : Union[str, Any]=[4, 2, 2] , lowercase_ : Tuple=[2, 1, 1] , lowercase_ : List[str]=[2, 2, 2] , lowercase_ : Union[str, Any]=[False, False, True] , lowercase_ : Optional[int]=[0.0, 0.0, 0.0] , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=True , lowercase_ : Optional[int]=True , lowercase_ : Optional[Any]=2 , ):
snake_case_ : List[Any] = parent
snake_case_ : int = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : Tuple = patch_sizes
snake_case_ : List[Any] = patch_stride
snake_case_ : Dict = patch_padding
snake_case_ : Any = is_training
snake_case_ : Any = use_labels
snake_case_ : str = num_labels
snake_case_ : Optional[Any] = num_channels
snake_case_ : Optional[Any] = embed_dim
snake_case_ : int = num_heads
snake_case_ : List[str] = stride_kv
snake_case_ : Any = depth
snake_case_ : Dict = cls_token
snake_case_ : Dict = attention_drop_rate
snake_case_ : int = initializer_range
snake_case_ : Tuple = layer_norm_eps
def _snake_case ( self : Dict ):
snake_case_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : str = None
if self.use_labels:
# create a random int32 tensor of given shape
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Dict = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : int ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _snake_case ( self : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ):
snake_case_ : Tuple = TFCvtModel(config=lowercase_ )
snake_case_ : Tuple = model(lowercase_ , training=lowercase_ )
snake_case_ : int = (self.image_size, self.image_size)
snake_case_, snake_case_ : List[str] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
snake_case_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
snake_case_ : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _snake_case ( self : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
snake_case_ : int = self.num_labels
snake_case_ : Any = TFCvtForImageClassification(lowercase_ )
snake_case_ : List[Any] = model(lowercase_ , labels=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Any ):
snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_ : List[str] = config_and_inputs
snake_case_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowerCAmelCase : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowerCAmelCase : str = False
_lowerCAmelCase : int = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : int = False
_lowerCAmelCase : int = False
def _snake_case ( self : int ):
snake_case_ : Optional[int] = TFCvtModelTester(self )
snake_case_ : str = TFCvtConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self : int ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def _snake_case ( self : Any ):
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def _snake_case ( self : str ):
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def _snake_case ( self : Union[str, Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def _snake_case ( self : Tuple ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def _snake_case ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def _snake_case ( self : List[str] ):
snake_case_ : Optional[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(lowercase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def _snake_case ( self : int ):
snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(lowercase_ )
snake_case_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[str] = [*signature.parameters.keys()]
snake_case_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self : List[str] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str ):
snake_case_ : Any = model_class(lowercase_ )
snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
snake_case_ : Tuple = outputs.hidden_states
snake_case_ : str = len(self.model_tester.depth )
self.assertEqual(len(lowercase_ ) , lowercase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _snake_case ( self : str ):
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _snake_case ( self : Optional[Any] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = TFCvtModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __lowercase ( ):
snake_case_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@cached_property
def _snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _snake_case ( self : Tuple ):
snake_case_ : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ : Any = self.default_image_processor
snake_case_ : Union[str, Any] = prepare_img()
snake_case_ : int = image_processor(images=lowercase_ , return_tensors='''tf''' )
# forward pass
snake_case_ : Tuple = model(**lowercase_ )
# verify the logits
snake_case_ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
snake_case_ : Tuple = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowercase_ , atol=1E-4 ) )
| 485
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Tuple = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : List[Any] = """rwkv"""
A_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , __snake_case=5_0277 , __snake_case=1024 , __snake_case=4096 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = context_length
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_SCREAMING_SNAKE_CASE : List[Any] = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_every
_SCREAMING_SNAKE_CASE : str = use_cache
_SCREAMING_SNAKE_CASE : int = bos_token_id
_SCREAMING_SNAKE_CASE : Any = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 533
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533
| 1
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict =get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( __A , unittest.TestCase ):
snake_case__ : Tuple = XGLMTokenizer
snake_case__ : str = XGLMTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : List[str] = True
def A__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XGLMTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
"""simple docstring"""
lowercase = '<pad>'
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(__lowerCAmelCase ) , 1008 )
def A__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def A__ ( self ):
"""simple docstring"""
lowercase = XGLMTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase )
lowercase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def A__ ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def A__ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__lowerCAmelCase , f.name )
lowercase = XGLMTokenizer(f.name , keep_accents=__lowerCAmelCase )
lowercase = pickle.dumps(__lowerCAmelCase )
pickle.loads(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = 'I was born in 92000, and this is falsé.'
lowercase = tokenizer.tokenize(__lowerCAmelCase )
lowercase = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = 'Hello World!'
lowercase = [2, 3_1227, 4447, 35]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowercase = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase ) )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""facebook/xglm-564M""" , padding=__lowerCAmelCase , )
| 704
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase="None" , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = relative_attention
lowercase = position_biased_input
lowercase = pos_att_type
lowercase = scope
def A__ ( self ):
"""simple docstring"""
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = ids_tensor([self.batch_size] , self.num_choices )
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_config()
lowercase = 300
return config
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowercase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )[0]
lowercase = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = DebertaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.num_labels
lowercase = DebertaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DebertaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Any = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ : int = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[Any] = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = False
snake_case__ : Any = False
snake_case__ : List[Any] = False
def A__ ( self ):
"""simple docstring"""
lowercase = DebertaModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = DebertaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def A__ ( self ):
"""simple docstring"""
pass
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowercase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
lowercase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
| 197
| 0
|
'''simple docstring'''
from math import factorial
A_ : List[str] = {str(d): factorial(d) for d in range(10)}
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(__magic_name__ ) )
def UpperCamelCase__ ( ) -> int:
'''simple docstring'''
snake_case__ : Any = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , __magic_name__ ) if sum_of_digit_factorial(__magic_name__ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 38
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
SCREAMING_SNAKE_CASE_ : Tuple =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
SCREAMING_SNAKE_CASE_ : List[Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
SCREAMING_SNAKE_CASE_ : Optional[Any] =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
SCREAMING_SNAKE_CASE_ : int =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ : Optional[int] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =vocab_file
SCREAMING_SNAKE_CASE_ : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def __lowerCamelCase ( self ):
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : Dict ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : int =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
return state
def __setstate__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : int =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any ={}
SCREAMING_SNAKE_CASE_ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.piece_to_id(__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : Dict =''
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : Optional[int] =True
SCREAMING_SNAKE_CASE_ : Any =[]
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =kwargs.pop('use_source_tokenizer' , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ : List[str] =[]
SCREAMING_SNAKE_CASE_ : Any =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE_ : Dict =re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(__UpperCAmelCase ) )
else:
SCREAMING_SNAKE_CASE_ : str =''.join(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ : Any =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] =os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] =[self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : List[str] =[self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 220
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A__ ( __A , __A , __A = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(__A ), magnitude * sin(__A )]
return [magnitude * cos(radians(__A ) ), magnitude * sin(radians(__A ) )]
def A__ ( __A , __A , __A = 10**-1 ):
'''simple docstring'''
_lowerCamelCase : NDArray[floataa] = cross(__A , __A )
_lowerCamelCase : float = sum(__A )
return abs(__A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase : List[str] =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase : Any =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase : int =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase : List[str] =array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCAmelCase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 15
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = ViTImageProcessor if is_vision_available() else None
@property
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = (3, 32, 128)
_lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : Dict = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase))))
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_UpperCamelCase) + """\n""")
_lowerCamelCase : Any = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , _UpperCamelCase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **_UpperCamelCase : Any) ->Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowerCamelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : int = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1))
return image_input
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_image_processor()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCamelCase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
_lowerCamelCase : int = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = image_processor(_UpperCamelCase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_UpperCamelCase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Optional[int] = """test"""
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase)
_lowerCamelCase : Dict = tokenizer(_UpperCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Any = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = """test"""
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any) ->str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.char_decode(_UpperCamelCase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_UpperCamelCase)
_lowerCamelCase : List[str] = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : int = None
_lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : List[Any] = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : List[str] = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : int = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 15
| 1
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : Optional[Any] = batch_size
__a : Optional[int] = seq_length
__a : Any = is_training
__a : Optional[int] = use_input_mask
__a : Union[str, Any] = use_token_type_ids
__a : str = use_labels
__a : int = vocab_size
__a : Tuple = hidden_size
__a : List[Any] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[Any] = intermediate_size
__a : Optional[Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : int = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_labels
__a : List[Any] = num_choices
__a : List[str] = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = None
if self.use_input_mask:
__a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
__a : Optional[int] = None
if self.use_labels:
__a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = BioGptModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a )
__a : Dict = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
'''simple docstring'''
__a : int = BioGptForCausalLM(config=__a )
model.to(__a )
model.eval()
__a : Tuple = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[Any] = BioGptModel(config=__a )
model.to(__a )
model.eval()
# create attention mask
__a : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
__a : List[Any] = self.seq_length // 2
__a : int = 0
# first forward pass
__a , __a : Dict = model(__a , attention_mask=__a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__a : Dict = ids_tensor((1,) , __a ).item() + 1
__a : int = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__a : List[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
__a : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : str = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a )] , dim=1 , )
# get two different outputs
__a : str = model(__a , attention_mask=__a )['last_hidden_state']
__a : Optional[Any] = model(__a , past_key_values=__a , attention_mask=__a )['last_hidden_state']
# select random slice
__a : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__a : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[Any] = BioGptModel(config=__a ).to(__a ).eval()
__a : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__a )
# first forward pass
__a : Optional[int] = model(__a , attention_mask=__a , use_cache=__a )
__a , __a : Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a : Optional[int] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__a : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : str = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__a : List[Any] = model(__a , attention_mask=__a )['last_hidden_state']
__a : List[Any] = model(__a , attention_mask=__a , past_key_values=__a )[
'last_hidden_state'
]
# select random slice
__a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__a : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False ):
'''simple docstring'''
__a : Optional[int] = BioGptForCausalLM(__a )
model.to(__a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a : Optional[int] = model(__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase ( self , __a , *__a ):
'''simple docstring'''
__a : Dict = BioGptModel(__a )
__a : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : Union[str, Any] = BioGptForTokenClassification(__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Union[str, Any] = config_and_inputs
__a : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
A_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A_ = (BioGptForCausalLM,) if is_torch_available() else ()
A_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = BioGptModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Tuple = type
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__a )
__a : List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__a : Dict = 'left'
# Define PAD Token = EOS Token = 50256
__a : Tuple = tokenizer.eos_token
__a : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__a : Tuple = [
'Hello, my dog is a little',
'Today, I',
]
__a : int = tokenizer(__a , return_tensors='pt' , padding=__a )
__a : int = inputs['input_ids'].to(__a )
__a : List[str] = model.generate(
input_ids=__a , attention_mask=inputs['attention_mask'].to(__a ) , )
__a : Union[str, Any] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(__a )
__a : str = model.generate(input_ids=__a )
__a : List[Any] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__a : Any = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(__a )
__a : Union[str, Any] = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
__a : str = tokenizer.batch_decode(__a , skip_special_tokens=__a )
__a : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
__a : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
__a : List[str] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = BioGptModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Union[str, Any] = 3
__a : List[str] = input_dict['input_ids']
__a : str = input_ids.ne(1 ).to(__a )
__a : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a : Union[str, Any] = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
__a : Tuple = 3
__a : int = 'multi_label_classification'
__a : Dict = input_dict['input_ids']
__a : Union[str, Any] = input_ids.ne(1 ).to(__a )
__a : List[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a : Optional[int] = BioGptForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__a : Any = torch.tensor([[2, 4805, 9, 656, 21]] )
__a : Tuple = model(__a )[0]
__a : str = 4_2384
__a : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , __a )
__a : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__a : Optional[int] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(__a )
torch.manual_seed(0 )
__a : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt' ).to(__a )
__a : Optional[int] = model.generate(
**__a , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__a , )
__a : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__a )
__a : int = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(__a , __a )
| 476
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase_ ( A__ , A__ ):
a_ = []
for part_id in partition_order:
a_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(A__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(1_00 ).repartition(1 )
a_ = Spark(A__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(10 ).repartition(2 )
a_ = [1, 0]
a_ = _generate_iterable_examples(A__ , A__ ) # Reverse the partitions.
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , A__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(10 ).repartition(1 )
a_ = SparkExamplesIterable(A__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
a_ = lambda A__ : x.reverse()
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [2, 1, 0] )
a_ = SparkExamplesIterable(A__ ).shuffle_data_sources(A__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a_ = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a_ = SparkExamplesIterable(A__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a_ = _get_expected_row_ids_and_row_dicts_for_partition_order(A__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(A__ ):
a_ , a_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase_ ( ):
a_ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
a_ = spark.range(1_00 ).repartition(1 )
a_ = Spark(A__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 263
| 0
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = 20
lowerCAmelCase_ : Optional[Any] = self._get_uniform_logits(batch_size=2 ,length=lowerCAmelCase__ )
# tweak scores to not be uniform anymore
lowerCAmelCase_ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase_ : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase_ : Optional[Any] = jax.nn.softmax(lowerCAmelCase__ ,axis=-1 )
lowerCAmelCase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : str = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase_ : int = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ ,scores.copy() ,cur_len=lowerCAmelCase__ ) ,axis=-1 )
lowerCAmelCase_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ ,scores.copy() ,cur_len=lowerCAmelCase__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Tuple = 2
# create ramp distribution
lowerCAmelCase_ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCAmelCase_ : int = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase_ : str = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : Any = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase_ : List[Any] = 5
lowerCAmelCase_ : Dict = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCAmelCase_ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, length) ).copy()
lowerCAmelCase_ : List[Any] = top_k_warp_safety_check(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : List[str] = 10
lowerCAmelCase_ : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase_ : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase_ : Any = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase_ : Union[str, Any] = np.exp(top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase_ : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase_ : Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase_ : Optional[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase_ : Tuple = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCAmelCase_ : Any = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 20
lowerCAmelCase_ : List[str] = 4
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
# check that min length is applied at length 5
lowerCAmelCase_ : Any = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCAmelCase_ : Dict = 5
lowerCAmelCase_ : Any = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = min_dist_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase_ : Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = 15
lowerCAmelCase_ : Optional[int] = min_dist_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase_ : Any = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : int = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = 20
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = 5
lowerCAmelCase_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase_ : Optional[Any] = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCAmelCase_ : Union[str, Any] = 4
lowerCAmelCase_ : Any = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase_ : Union[str, Any] = 3
lowerCAmelCase_ : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 4
lowerCAmelCase_ : Union[str, Any] = 10
lowerCAmelCase_ : List[Any] = 15
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Dict = 15
# dummy input_ids and scores
lowerCAmelCase_ : List[Any] = ids_tensor((batch_size, sequence_length) ,lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = input_ids.copy()
lowerCAmelCase_ : str = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = scores.copy()
# instantiate all dist processors
lowerCAmelCase_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = 10
# no processor list
lowerCAmelCase_ : int = temp_dist_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = min_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = bos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = eos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# with processor list
lowerCAmelCase_ : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase_ : List[str] = processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : int = 15
lowerCAmelCase_ : Union[str, Any] = 2
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 15
# dummy input_ids and scores
lowerCAmelCase_ : str = ids_tensor((batch_size, sequence_length) ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = input_ids.copy()
lowerCAmelCase_ : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = scores.copy()
# instantiate all dist processors
lowerCAmelCase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = temp_dist_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Any = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = min_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = eos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : str ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase_ : Any = processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
return scores
lowerCAmelCase_ : Any = jax.jit(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = jax.jit(lowerCAmelCase__ )
lowerCAmelCase_ : str = jitted_run_no_processor_list(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = jitted_run_processor_list(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 709
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "▁"
lowerCamelCase__ = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
lowerCamelCase__ = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
lowerCamelCase__ = {"vinai/bartpho-syllable": 1_024}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["input_ids", "attention_mask"]
def __init__(self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(lowerCamelCase__ , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase__ ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , lowerCamelCase__ ):
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A (self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A (self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def A (self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A (self ):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A (self ):
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A (self , lowerCamelCase__ ):
"""simple docstring"""
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def A (self , lowerCamelCase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A (self , lowerCamelCase__ ):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def A (self , lowerCamelCase__ , lowerCamelCase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(lowerCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 574
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "xlm-roberta-xl"
def __init__(self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_6 , lowerCamelCase__=3_2 , lowerCamelCase__=1_0_2_4_0 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 574
| 1
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[int] ) -> int:
"""simple docstring"""
return getitem, k
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
return setitem, k, v
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
return delitem, k
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] , UpperCamelCase : Dict , *UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
try:
return fun(UpperCamelCase , *UpperCamelCase ), None
except Exception as e:
return None, e
_A = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
a_ = HashMap(initial_block_size=4 )
a_ = {}
for _, (fun, *args) in enumerate(UpperCamelCase ):
a_ , a_ = _run_operation(UpperCamelCase , UpperCamelCase , *UpperCamelCase )
a_ , a_ = _run_operation(UpperCamelCase , UpperCamelCase , *UpperCamelCase )
assert my_res == py_res
assert str(UpperCamelCase ) == str(UpperCamelCase )
assert set(UpperCamelCase ) == set(UpperCamelCase )
assert len(UpperCamelCase ) == len(UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
def is_public(UpperCamelCase : str ) -> bool:
return not name.startswith("""_""" )
a_ = {name for name in dir({} ) if is_public(UpperCamelCase )}
a_ = {name for name in dir(HashMap() ) if is_public(UpperCamelCase )}
assert dict_public_names > hash_public_names
| 403
|
import argparse
import os
import re
_A = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_A = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_A = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
_A = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_A = re.compile(r'\[([^\]]+)\]')
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
a_ = _re_indent.search(UpperCamelCase )
return "" if search is None else search.groups()[0]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]="" , UpperCamelCase : int=None , UpperCamelCase : Dict=None ) -> List[str]:
"""simple docstring"""
a_ = 0
a_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase ):
index += 1
a_ = ["""\n""".join(lines[:index] )]
else:
a_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ = [lines[index]]
index += 1
while index < len(UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCamelCase ) )
if index < len(UpperCamelCase ) - 1:
a_ = [lines[index + 1]]
index += 1
else:
a_ = []
else:
blocks.append("""\n""".join(UpperCamelCase ) )
a_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase ) > 0:
blocks.append("""\n""".join(UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(UpperCamelCase : Optional[int] ):
return key(UpperCamelCase ).lower().replace("""_""" , """""" )
return _inner
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(UpperCamelCase : Tuple ):
return x
if key is None:
a_ = noop
# Constants are all uppercase, they go first.
a_ = [obj for obj in objects if key(UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ = [obj for obj in objects if key(UpperCamelCase )[0].isupper() and not key(UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
a_ = [obj for obj in objects if not key(UpperCamelCase )[0].isupper()]
a_ = ignore_underscore(UpperCamelCase )
return sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
def _replace(UpperCamelCase : Tuple ):
a_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] ) + "]"
a_ = import_statement.split("""\n""" )
if len(UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ = 2 if lines[1].strip() == """[""" else 1
a_ = [(i, _re_strip_line.search(UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ = sort_objects(UpperCamelCase , key=lambda UpperCamelCase : x[1] )
a_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ = keys[:-1]
a_ = get_indent(lines[1] ) + """, """.join([F"""\"{k}\"""" for k in sort_objects(UpperCamelCase )] )
return "\n".join(UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
a_ = _re_bracket_content.sub(_replace , UpperCamelCase )
return import_statement
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str=True ) -> Tuple:
"""simple docstring"""
with open(UpperCamelCase , """r""" ) as f:
a_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ = split_code_in_indented_blocks(
UpperCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ = main_blocks[block_idx]
a_ = block.split("""\n""" )
# Get to the start of the imports.
a_ = 0
while line_idx < len(UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ = len(UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ = """\n""".join(block_lines[line_idx:-1] )
a_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ = split_code_in_indented_blocks(UpperCamelCase , indent_level=UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ = [(pattern.search(UpperCamelCase ).groups()[0] if pattern.search(UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ = [(i, key) for i, key in enumerate(UpperCamelCase ) if key is not None]
a_ = [x[0] for x in sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ = 0
a_ = []
for i in range(len(UpperCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
a_ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(UpperCamelCase , """w""" ) as f:
f.write("""\n""".join(UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any]=True ) -> List[str]:
"""simple docstring"""
a_ = []
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
a_ = sort_imports(os.path.join(UpperCamelCase , """__init__.py""" ) , check_only=UpperCamelCase )
if result:
a_ = [os.path.join(UpperCamelCase , """__init__.py""" )]
if len(UpperCamelCase ) > 0:
raise ValueError(F"""Would overwrite {len(UpperCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
_A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 403
| 1
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__magic_name__ = """\
Text data.
Second line of data."""
__magic_name__ = """file"""
@pytest.fixture(scope='session' )
def _lowerCamelCase ( UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
a__ = bytes(lowercase_,'utf-8' )
with zstd.open(lowercase_,'wb' ) as f:
f.write(lowercase_ )
return path
@pytest.fixture
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir,lowercase_ ),'w' ) as f:
f.write(lowercase_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format',['gzip', 'xz', 'zstd'] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
a__ = input_paths[compression_format]
a__ = tmp_path / """cache"""
a__ = DownloadConfig(cache_dir=lowercase_,extract_compressed_file=lowercase_ )
a__ = cached_path(lowercase_,download_config=lowercase_ )
with open(lowercase_ ) as f:
a__ = f.read()
with open(lowercase_ ) as f:
a__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted',[True, False] )
@pytest.mark.parametrize('default_cache_dir',[True, False] )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> int:
'''simple docstring'''
a__ = """custom_cache"""
a__ = """custom_extracted_dir"""
a__ = tmp_path / """custom_extracted_path"""
if default_extracted:
a__ = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR',lowercase_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH',str(lowercase_ ) )
a__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
a__ = xz_file
a__ = (
DownloadConfig(extract_compressed_file=lowercase_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir,extract_compressed_file=lowercase_ )
)
a__ = cached_path(lowercase_,download_config=lowercase_ )
assert Path(lowercase_ ).parent.parts[-2:] == expected
def _lowerCamelCase ( UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = str(Path(lowercase_ ).resolve() )
assert cached_path(lowercase_ ) == text_file
# relative path
a__ = str(Path(lowercase_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase_ ) == text_file
def _lowerCamelCase ( UpperCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
# relative path
a__ = """./__missing_file__.txt"""
with pytest.raises(lowercase_ ):
cached_path(lowercase_ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> Any:
'''simple docstring'''
a__ = get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowercase_ ) as f:
a__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE',lowercase_ )
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(lowercase_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE',lowercase_ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowercase_ ):
http_get('https://huggingface.co',temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE',lowercase_ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowercase_ ):
ftp_get('ftp://huggingface.co',temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE',lowercase_ )
def _lowerCamelCase ( UpperCAmelCase__ ) -> str:
'''simple docstring'''
a__ = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(lowercase_ ):
fsspec_get('s3://huggingface.co',temp_file=lowercase_ )
with pytest.raises(lowercase_ ):
fsspec_head('s3://huggingface.co' )
| 232
|
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("""CUDA out of memory.""" )
class _snake_case ( nn.Module ):
def __init__( self):
'''simple docstring'''
super().__init__()
lowercase__ : Optional[Any] = nn.Linear(3 , 4)
lowercase__ : Union[str, Any] = nn.BatchNormad(4)
lowercase__ : str = nn.Linear(4 , 5)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(SCREAMING_SNAKE_CASE_)))
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = []
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
nonlocal batch_sizes
batch_sizes.append(SCREAMING_SNAKE_CASE_)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase__ , lowercase__ : int = mock_training_loop_function("""hello""")
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [1_28, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, """hello"""])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""")
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0])
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0])
def lowercase__ ( self):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(SCREAMING_SNAKE_CASE_):
raise ValueError("""Oops, we had an error!""")
with self.assertRaises(SCREAMING_SNAKE_CASE_) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0])
@require_cuda
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = torch.cuda.memory_allocated()
lowercase__ : str = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = release_memory(SCREAMING_SNAKE_CASE_)
self.assertEqual(torch.cuda.memory_allocated() , SCREAMING_SNAKE_CASE_)
| 12
| 0
|
'''simple docstring'''
def _snake_case ( A_ : int = 10**9 ):
"""simple docstring"""
a_ : int = 1
a_ : List[Any] = 2
a_ : Union[str, Any] = 0
a_ : Union[str, Any] = 0
a_ : Optional[int] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
a_ : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 700
|
'''simple docstring'''
def _snake_case ( A_ : int , A_ : Optional[Any] , A_ : Optional[int] , A_ : Dict ):
"""simple docstring"""
a_ : Optional[int] = [False] * len(A_ )
a_ : List[str] = []
queue.append(A_ )
a_ : Union[str, Any] = True
while queue:
a_ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a_ : Any = True
a_ : List[Any] = u
return visited[t]
def _snake_case ( A_ : Union[str, Any] , A_ : Union[str, Any] , A_ : List[Any] ):
"""simple docstring"""
a_ : Dict = [-1] * (len(A_ ))
a_ : List[str] = 0
while bfs(A_ , A_ , A_ , A_ ):
a_ : Any = float("""Inf""" )
a_ : Any = sink
while s != source:
# Find the minimum value in select path
a_ : Any = min(A_ , graph[parent[s]][s] )
a_ : List[Any] = parent[s]
max_flow += path_flow
a_ : Tuple = sink
while v != source:
a_ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a_ : List[Any] = parent[v]
return max_flow
__snake_case: List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__snake_case ,__snake_case: int = 0, 5
print(ford_fulkerson(graph, source, sink))
| 460
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
_lowerCamelCase : Tuple =[image]
_lowerCamelCase : Tuple =[trans(img.convert('RGB' ) ) for img in image]
_lowerCamelCase : List[Any] =torch.stack(SCREAMING_SNAKE_CASE__ )
return image
class A ( UpperCamelCase_ ):
def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : List[Any] =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def lowerCamelCase ( self : int , lowercase_ : int ) -> str:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : int ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : str =min(int(num_inference_steps * strength ) , lowercase_ )
_lowerCamelCase : Optional[int] =max(num_inference_steps - init_timestep , 0 )
_lowerCamelCase : int =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any]=None ) -> Any:
"""simple docstring"""
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
_lowerCamelCase : Tuple =image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCamelCase : Union[str, Any] =init_latents.shape
_lowerCamelCase : List[str] =randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
_lowerCamelCase : Optional[Any] =self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] =init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowercase_ : float = 0.8 , lowercase_ : int = 1 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : float = 0.0 , lowercase_ : int = 50 , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(lowercase_ )
# 2. Preprocess image
_lowerCamelCase : str =preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
_lowerCamelCase , _lowerCamelCase : Dict =self.get_timesteps(lowercase_ , lowercase_ , self.device )
_lowerCamelCase : List[Any] =timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
_lowerCamelCase : str =self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
_lowerCamelCase : Any =latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
_lowerCamelCase : List[Any] =self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : List[Any] =self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
_lowerCamelCase : int =(image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Optional[int] =self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 464
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase = logging.get_logger(__name__)
class A ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 464
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=1_3 ,lowerCamelCase_=3_2 ,lowerCamelCase_=3 ,lowerCamelCase_=4 ,lowerCamelCase_=[1_0, 2_0, 3_0, 4_0] ,lowerCamelCase_=[2, 2, 3, 2] ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=3_7 ,lowerCamelCase_="gelu" ,lowerCamelCase_=1_0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=["stage2", "stage3", "stage4"] ,lowerCamelCase_=[2, 3, 4] ,lowerCamelCase_=None ,) -> Optional[int]:
A = parent
A = batch_size
A = image_size
A = num_channels
A = num_stages
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = intermediate_size
A = hidden_act
A = num_labels
A = initializer_range
A = out_features
A = out_indices
A = scope
def UpperCamelCase__ ( self ) -> Tuple:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> str:
return ConvNextConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=lowerCAmelCase_ ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
A = ConvNextModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = ConvNextForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A = model(lowerCAmelCase_ ,labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
A = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A = model(lowerCAmelCase_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = ConvNextBackbone(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A = model(lowerCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCamelCase__ ( self ) -> Tuple:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
A = ConvNextModelTester(self )
A = ConfigTester(self ,config_class=lowerCAmelCase_ ,has_text_modality=lowerCAmelCase_ ,hidden_size=3_7 )
def UpperCamelCase__ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Dict:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
def UpperCamelCase__ ( self ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(lowerCAmelCase_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCAmelCase_ )
def UpperCamelCase__ ( self ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase_ )
def UpperCamelCase__ ( self ) -> int:
def check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ):
A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(lowerCAmelCase_ ,lowerCAmelCase_ ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
def UpperCamelCase__ ( self ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ConvNextModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def _A ( ):
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCAmelCase_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=lowerCAmelCase_ ,return_tensors="""pt""" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
A = model(**lowerCAmelCase_ )
# verify the logits
A = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase_ )
A = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase_ ,atol=1E-4 ) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase , __lowerCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase = ConvNextConfig
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> Dict:
A = ConvNextModelTester(self )
| 717
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> Dict:
A = path_or_paths
A = split if split or isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else """train"""
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,lowerCamelCase_ = False ,lowerCamelCase_ = False ,lowerCamelCase_ = None ,**lowerCamelCase_ ,) -> int:
A = features
A = cache_dir
A = keep_in_memory
A = streaming
A = num_proc
A = kwargs
@abstractmethod
def UpperCamelCase__ ( self ) -> Union[Dataset, IterableDataset]:
pass
| 255
| 0
|
_lowerCamelCase : Union[str, Any] = [0, 2, 4, 6, 8]
_lowerCamelCase : List[Any] = [1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
A__ = 0
for digit in range(10 ):
A__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowercase_ , lowercase_ )
return result
A__ = 0
for digita in range(10 ):
A__ = digita
if (remainder + digita) % 2 == 0:
A__ = ODD_DIGITS
else:
A__ = EVEN_DIGITS
for digita in other_parity_digits:
A__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowercase_ , lowercase_ , )
return result
def SCREAMING_SNAKE_CASE ( lowercase_ = 9 ) -> int:
"""simple docstring"""
A__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowercase_ , 0 , [0] * length , lowercase_ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 64
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['''input_features''', '''attention_mask''']
def __init__( self , snake_case=80 , snake_case=1_6000 , snake_case=0.0 , snake_case=10 , snake_case=25 , snake_case="hamming_window" , snake_case=3_27_68.0 , snake_case=0.97 , snake_case=1.0 , snake_case=True , snake_case=True , snake_case=False , **snake_case , ):
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
snake_case_ = feature_size
snake_case_ = sampling_rate
snake_case_ = padding_value
snake_case_ = hop_length
snake_case_ = win_length
snake_case_ = frame_signal_scale
snake_case_ = preemphasis_coeff
snake_case_ = mel_floor
snake_case_ = normalize_means
snake_case_ = normalize_vars
snake_case_ = win_function
snake_case_ = return_attention_mask
snake_case_ = win_length * sampling_rate // 1000
snake_case_ = hop_length * sampling_rate // 1000
snake_case_ = optimal_fft_length(self.sample_size )
snake_case_ = (self.n_fft // 2) + 1
def a ( self , snake_case ):
if self.win_function == "hamming_window":
snake_case_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
else:
snake_case_ = window_function(window_length=self.sample_size , name=self.win_function )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
snake_case_ = spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case , preemphasis=self.preemphasis_coeff , mel_filters=snake_case , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def a ( self , snake_case , snake_case , snake_case ):
# make sure we normalize float32 arrays
if self.normalize_means:
snake_case_ = x[:input_length].mean(axis=0 )
snake_case_ = np.subtract(snake_case , snake_case )
if self.normalize_vars:
snake_case_ = x[:input_length].std(axis=0 )
snake_case_ = np.divide(snake_case , snake_case )
if input_length < x.shape[0]:
snake_case_ = padding_value
# make sure array is in float32
snake_case_ = x.astype(np.floataa )
return x
def a ( self , snake_case , snake_case = None ):
snake_case_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case , snake_case , self.padding_value ) for x, n in zip(snake_case , snake_case )]
def __call__( self , snake_case , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [raw_speech]
# extract fbank features
snake_case_ = [self._extract_mfsc_features(snake_case ) for one_waveform in raw_speech]
# convert into correct format for padding
snake_case_ = BatchFeature({'input_features': features} )
snake_case_ = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
# make sure list is in array format
snake_case_ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_features]
snake_case_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
snake_case_ = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
snake_case_ = (
np.array(snake_case , dtype=np.intaa )
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
snake_case_ = self.normalize(
padded_inputs['input_features'] , attention_mask=snake_case )
if return_tensors is not None:
snake_case_ = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
| 108
|
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError('List is empty' )
snake_case_ = sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 1
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_lowercase = logging.get_logger(__name__)
def lowerCAmelCase__ ( ) ->List[str]:
# Get the sagemaker specific mp parameters from smp_options variable.
__lowercase = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__lowercase = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__lowercase = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__lowercase = json.loads(__magic_name__ )
if not mpi_options.get("sagemaker_mpi_enabled" , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , _lowerCamelCase , )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> "torch.device":
'''simple docstring'''
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__lowercase = torch.device("cpu" )
__lowercase = 0
elif is_sagemaker_model_parallel_available():
__lowercase = smp.local_rank()
__lowercase = torch.device("cuda" , _lowerCamelCase )
__lowercase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__lowercase = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__lowercase = torch.device("cuda" , self.local_rank )
__lowercase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__lowercase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__lowercase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__lowercase = torch.device("cuda" , self.local_rank )
__lowercase = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return False
| 118
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ) ->Any:
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ) -> int:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , _lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + "\n" )
__lowercase = 0
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(_lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 118
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowercase_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowercase_ = '''▁'''
# Segments (not really needed)
lowercase_ = 0
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : int =VOCAB_FILES_NAMES
lowerCamelCase__ : Tuple =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple ="left"
lowerCamelCase__ : List[str] =XLNetTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase=["<eop>", "<eod>"] , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Dict = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
vocab_file=lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , remove_space=lowerCamelCase , keep_accents=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__magic_name__ : Union[str, Any] = 3
__magic_name__ : List[Any] = do_lower_case
__magic_name__ : Union[str, Any] = remove_space
__magic_name__ : Tuple = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : Tuple = False if not self.vocab_file else True
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ : Union[str, Any] = os.path.join(
lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ):
copyfile(self.vocab_file , lowerCamelCase )
return (out_vocab_file,)
| 336
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A__ ( unittest.TestCase ):
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__magic_name__ : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(lowerCamelCase ) , lowerCamelCase )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , x.transpose() ) )
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : List[str] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
__magic_name__ : int = np.random.randn(3 , 4 , 5 )
__magic_name__ : Union[str, Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
__magic_name__ : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , transpose(lowerCamelCase ).numpy() ) )
__magic_name__ : str = np.random.randn(3 , 4 , 5 )
__magic_name__ : Optional[int] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , transpose(lowerCamelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : Optional[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase ) , np.asarray(transpose(lowerCamelCase ) ) ) )
__magic_name__ : int = np.random.randn(3 , 4 , 5 )
__magic_name__ : Tuple = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(transpose(lowerCamelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCamelCase , axes=(1, 2, 0) ) ) ) )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.reshape(lowerCamelCase , (4, 3) ) ) )
__magic_name__ : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.reshape(lowerCamelCase , (12, 5) ) ) )
@require_torch
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = np.random.randn(3 , 4 )
__magic_name__ : List[Any] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
__magic_name__ : List[str] = np.random.randn(3 , 4 , 5 )
__magic_name__ : Tuple = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_tf
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : List[str] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , reshape(lowerCamelCase , (4, 3) ).numpy() ) )
__magic_name__ : Optional[Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ : str = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , reshape(lowerCamelCase , (12, 5) ).numpy() ) )
@require_flax
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Dict = np.random.randn(3 , 4 )
__magic_name__ : Optional[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (4, 3) ) , np.asarray(reshape(lowerCamelCase , (4, 3) ) ) ) )
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 , 5 )
__magic_name__ : List[Any] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(reshape(lowerCamelCase , (12, 5) ) , np.asarray(reshape(lowerCamelCase , (12, 5) ) ) ) )
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.squeeze(lowerCamelCase ) ) )
__magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.squeeze(lowerCamelCase , axis=2 ) ) )
@require_torch
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Any = np.random.randn(1 , 3 , 4 )
__magic_name__ : List[str] = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
__magic_name__ : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : Tuple = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_tf
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[int] = np.random.randn(1 , 3 , 4 )
__magic_name__ : Any = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , squeeze(lowerCamelCase ).numpy() ) )
__magic_name__ : int = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : str = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , squeeze(lowerCamelCase , axis=2 ).numpy() ) )
@require_flax
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : str = np.random.randn(1 , 3 , 4 )
__magic_name__ : List[str] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase ) , np.asarray(squeeze(lowerCamelCase ) ) ) )
__magic_name__ : Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
__magic_name__ : Optional[int] = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(squeeze(lowerCamelCase , axis=2 ) , np.asarray(squeeze(lowerCamelCase , axis=2 ) ) ) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.expand_dims(lowerCamelCase , axis=1 ) ) )
@require_torch
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = np.random.randn(3 , 4 )
__magic_name__ : str = torch.tensor(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_tf
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : List[str] = np.random.randn(3 , 4 )
__magic_name__ : Union[str, Any] = tf.constant(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , expand_dims(lowerCamelCase , axis=1 ).numpy() ) )
@require_flax
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = np.random.randn(3 , 4 )
__magic_name__ : int = jnp.array(lowerCamelCase )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase , axis=1 ) , np.asarray(expand_dims(lowerCamelCase , axis=1 ) ) ) )
| 336
| 1
|
import string
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> None:
for key in range(len(string.ascii_uppercase ) ):
_lowercase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_lowercase = string.ascii_uppercase.find(snake_case__ )
_lowercase = num - key
if num < 0:
_lowercase = num + len(string.ascii_uppercase )
_lowercase = translated + string.ascii_uppercase[num]
else:
_lowercase = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_lowercase = input('Encrypted message: ' )
_lowercase = message.upper()
decrypt(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 67
| 1
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A : Any = logging.get_logger(__name__)
def __UpperCamelCase ( _A : Tuple , _A : Any ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =[]
def parse_line(_A : List[Any] ):
for line in fp:
if isinstance(_A , _A ):
lowerCamelCase_ =line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_A ) > 0:
lowerCamelCase_ ="""\n""".join(_A )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(_A )
buffer.clear()
continue
else:
lowerCamelCase_ =line.strip()
buffer.append(_A )
if from_gh:
for filename in os.listdir(_A ):
lowerCamelCase_ =os.path.join(_A , _A )
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with open(_A ) as fp:
parse_line(_A )
else:
try:
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_A ) as fp:
parse_line(_A )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def __UpperCamelCase ( _A : Optional[Any] , _A : int ) ->str:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =[os.path.join(_A , _A ) for p in os.listdir(_A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_A , _A ) )
return selected_warnings
if __name__ == "__main__":
def __UpperCamelCase ( _A : Dict ) ->Optional[int]:
"""simple docstring"""
return values.split(""",""" )
__A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__A : Union[str, Any] = parser.parse_args()
__A : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A : Optional[int] = extract_warnings(args.output_dir, args.targets)
__A : Tuple = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 721
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__A : Optional[Any] = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
__A : Tuple = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
__A : str = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCamelCase ( _A : List[Any] , _A : Union[str, Any] ) ->Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any]="binary" ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =simple_accuracy(_A , _A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=_A , average=_A ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCamelCase ( _A : int , _A : Union[str, Any] ) ->int:
"""simple docstring"""
lowerCamelCase_ ={}
for id_pred, label in zip(_A , _A ):
lowerCamelCase_ =f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
lowerCamelCase_ =id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCamelCase_ =[(pred, label)]
lowerCamelCase_ , lowerCamelCase_ =[], []
for question, preds_labels in question_map.items():
lowerCamelCase_ , lowerCamelCase_ =zip(*_A )
lowerCamelCase_ =fa_score(y_true=_A , y_pred=_A , average="""macro""" )
fas.append(_A )
lowerCamelCase_ =int(sum(pred == label for pred, label in preds_labels ) == len(_A ) )
ems.append(_A )
lowerCamelCase_ =float(sum(_A ) / len(_A ) )
lowerCamelCase_ =sum(_A ) / len(_A )
lowerCamelCase_ =float(fa_score(y_true=_A , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _SCREAMING_SNAKE_CASE ( datasets.Metric):
def _snake_case ( self )-> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _snake_case ( self )-> Optional[Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> Optional[int]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="""macro""" )
elif self.config_name == "record":
lowerCamelCase_ =[
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCamelCase_ ={pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 75
| 0
|
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__a : Union[str, Any] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
__a : int = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
__a : Dict = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : int="auto" , UpperCamelCase_ : str=-1 , UpperCamelCase_ : str=0.9 , UpperCamelCase_ : Dict=5 , UpperCamelCase_ : str=500 , UpperCamelCase_ : int="gpt2-large" , UpperCamelCase_ : Dict=-1 , UpperCamelCase_ : Tuple=1_024 , UpperCamelCase_ : Optional[Any]=25 , UpperCamelCase_ : str=5 , UpperCamelCase_ : str=True , UpperCamelCase_ : Dict=25 , ):
"""simple docstring"""
__A = compute_mauve(
p_text=UpperCamelCase_ , q_text=UpperCamelCase_ , p_features=UpperCamelCase_ , q_features=UpperCamelCase_ , p_tokens=UpperCamelCase_ , q_tokens=UpperCamelCase_ , num_buckets=UpperCamelCase_ , pca_max_data=UpperCamelCase_ , kmeans_explained_var=UpperCamelCase_ , kmeans_num_redo=UpperCamelCase_ , kmeans_max_iter=UpperCamelCase_ , featurize_model_name=UpperCamelCase_ , device_id=UpperCamelCase_ , max_text_length=UpperCamelCase_ , divergence_curve_discretization_size=UpperCamelCase_ , mauve_scaling_factor=UpperCamelCase_ , verbose=UpperCamelCase_ , seed=UpperCamelCase_ , )
return out
| 637
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
__a : Tuple = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : str , __lowercase : bool ) -> List[Any]:
"""simple docstring"""
__A = FairseqRobertaModel.from_pretrained(__lowercase )
roberta.eval() # disable dropout
__A = roberta.model.encoder.sentence_encoder
__A = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , __lowercase )
__A = XLMRobertaXLForSequenceClassification(__lowercase ) if classification_head else XLMRobertaXLForMaskedLM(__lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__A = roberta_sent_encoder.embed_tokens.weight
__A = roberta_sent_encoder.embed_positions.weight
__A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__A = roberta_sent_encoder.layer_norm.weight
__A = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__A = model.roberta.encoder.layer[i]
__A = roberta_sent_encoder.layers[i]
__A = layer.attention
__A = roberta_layer.self_attn_layer_norm.weight
__A = roberta_layer.self_attn_layer_norm.bias
# self attention
__A = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__A = roberta_layer.self_attn.q_proj.weight
__A = roberta_layer.self_attn.q_proj.bias
__A = roberta_layer.self_attn.k_proj.weight
__A = roberta_layer.self_attn.k_proj.bias
__A = roberta_layer.self_attn.v_proj.weight
__A = roberta_layer.self_attn.v_proj.bias
# self-attention output
__A = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__A = roberta_layer.self_attn.out_proj.weight
__A = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__A = roberta_layer.final_layer_norm.weight
__A = roberta_layer.final_layer_norm.bias
# intermediate
__A = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# output
__A = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__A = roberta_layer.fca.weight
__A = roberta_layer.fca.bias
# end of layer
if classification_head:
__A = roberta.model.classification_heads["""mnli"""].dense.weight
__A = roberta.model.classification_heads["""mnli"""].dense.bias
__A = roberta.model.classification_heads["""mnli"""].out_proj.weight
__A = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__A = roberta.model.encoder.lm_head.dense.weight
__A = roberta.model.encoder.lm_head.dense.bias
__A = roberta.model.encoder.lm_head.layer_norm.weight
__A = roberta.model.encoder.lm_head.layer_norm.bias
__A = roberta.model.encoder.lm_head.weight
__A = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__A = roberta.encode(__lowercase ).unsqueeze(0 ) # batch of size 1
__A = model(__lowercase )[0]
if classification_head:
__A = roberta.model.classification_heads["""mnli"""](roberta.extract_features(__lowercase ) )
else:
__A = roberta.model(__lowercase )[0]
print(our_output.shape , their_output.shape )
__A = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__A = torch.allclose(__lowercase , __lowercase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(__lowercase ).mkdir(parents=__lowercase , exist_ok=__lowercase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowercase )
if __name__ == "__main__":
__a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__a : Dict = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 637
| 1
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : str = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def lowercase ( lowerCAmelCase : str = "dhaka" , lowerCAmelCase : int = 5):
"""simple docstring"""
_A : Optional[int] = min(lowerCAmelCase , 50) # Prevent abuse!
_A : Union[str, Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_A : str = requests.get('''https://www.google.com/search''' , params=lowerCAmelCase , headers=lowerCAmelCase)
_A : List[Any] = BeautifulSoup(html.text , '''html.parser''')
_A : Dict = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script'''))))
_A : List[Any] = json.dumps(lowerCAmelCase)
_A : Optional[int] = json.loads(lowerCAmelCase)
_A : List[Any] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , lowerCAmelCase , )
if not matched_google_image_data:
return 0
_A : str = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(lowerCAmelCase) , )
_A : Optional[Any] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(lowerCAmelCase):
if index >= max_images:
return index
_A : Dict = bytes(lowerCAmelCase , '''ascii''').decode(
'''unicode-escape''')
_A : Optional[Any] = bytes(lowerCAmelCase , '''ascii''').decode(
'''unicode-escape''')
_A : Optional[int] = urllib.request.build_opener()
_A : Union[str, Any] = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(lowerCAmelCase)
_A : Tuple = f"""query_{query.replace(' ' , '_')}"""
if not os.path.exists(lowerCAmelCase):
os.makedirs(lowerCAmelCase)
urllib.request.urlretrieve( # noqa: S310
lowerCAmelCase , f"""{path_name}/original_size_img_{index}.jpg""")
return index
if __name__ == "__main__":
try:
__UpperCamelCase : int = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 721
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCamelCase : str = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """albert"""
def __init__( self , UpperCAmelCase__=3_0_0_0_0 , UpperCAmelCase__=1_2_8 , UpperCAmelCase__=4_0_9_6 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1 , UpperCAmelCase__=6_4 , UpperCAmelCase__=1_6_3_8_4 , UpperCAmelCase__=1 , UpperCAmelCase__="gelu_new" , UpperCAmelCase__=0 , UpperCAmelCase__=0 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0.1 , UpperCAmelCase__="absolute" , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , **UpperCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : Optional[Any] = vocab_size
_A : Optional[int] = embedding_size
_A : str = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_hidden_groups
_A : Optional[Any] = num_attention_heads
_A : Tuple = inner_group_num
_A : Tuple = hidden_act
_A : List[Any] = intermediate_size
_A : str = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[Any] = type_vocab_size
_A : Any = initializer_range
_A : Tuple = layer_norm_eps
_A : Dict = classifier_dropout_prob
_A : Union[str, Any] = position_embedding_type
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 417
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["LayoutLMv3FeatureExtractor"]
_lowerCAmelCase = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 432
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : str = BartphoTokenizer
_a : Union[str, Any] = False
_a : Any = True
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
__snake_case : List[Any] =['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case : Any =dict(zip(a , range(len(a ) ) ) )
__snake_case : Dict ={'''unk_token''': '''<unk>'''}
__snake_case : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
__snake_case : Optional[Any] =BartphoTokenizer(a , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self : Union[str, Any] , **a : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a )
def _UpperCamelCase ( self : Union[str, Any] , a : Union[str, Any] ):
"""simple docstring"""
__snake_case : Optional[int] ='''This is a là test'''
__snake_case : Optional[Any] ='''This is a<unk><unk> test'''
return input_text, output_text
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
__snake_case : Any =BartphoTokenizer(a , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case : Optional[int] ='''This is a là test'''
__snake_case : List[Any] ='''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case : Union[str, Any] =tokenizer.tokenize(a )
self.assertListEqual(a , a )
__snake_case : Any =tokens + [tokenizer.unk_token]
__snake_case : Optional[int] =[4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
| 708
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ : str = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 497
| 0
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
__a = {}
__a = job["""started_at"""]
__a = job["""completed_at"""]
__a = date_parser.parse(__SCREAMING_SNAKE_CASE )
__a = date_parser.parse(__SCREAMING_SNAKE_CASE )
__a = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__a = start
__a = end
__a = duration_in_min
return job_info
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
__a = None
if token is not None:
__a = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__a = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
__a = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
__a = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__SCREAMING_SNAKE_CASE ):
__a = requests.get(url + F'''&page={i + 2}''' , headers=__SCREAMING_SNAKE_CASE ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__SCREAMING_SNAKE_CASE ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 582
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
pass
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
for shard in shards:
for i in range(__SCREAMING_SNAKE_CASE ):
yield {"i": i, "shard": shard}
def __lowercase ( ) -> Tuple:
"""simple docstring"""
__a = int(os.environ["""RANK"""] )
__a = int(os.environ["""WORLD_SIZE"""] )
__a = ArgumentParser()
parser.add_argument("""--streaming""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--local_rank""" , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--num_workers""" , type=__SCREAMING_SNAKE_CASE , default=0 )
__a = parser.parse_args()
__a = args.streaming
__a = args.num_workers
__a = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(__SCREAMING_SNAKE_CASE )]}
__a = IterableDataset.from_generator(__SCREAMING_SNAKE_CASE , gen_kwargs=__SCREAMING_SNAKE_CASE )
if not streaming:
__a = Dataset.from_list(list(__SCREAMING_SNAKE_CASE ) )
__a = split_dataset_by_node(__SCREAMING_SNAKE_CASE , rank=__SCREAMING_SNAKE_CASE , world_size=__SCREAMING_SNAKE_CASE )
__a = torch.utils.data.DataLoader(__SCREAMING_SNAKE_CASE , num_workers=__SCREAMING_SNAKE_CASE )
__a = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__a = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__a = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 582
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : list[list] ) -> list[list]:
_SCREAMING_SNAKE_CASE : Optional[Any] = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Any = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
_SCREAMING_SNAKE_CASE : Optional[int] = column
continue
_SCREAMING_SNAKE_CASE : int = column / magnitude
# Subtract to cancel term
_SCREAMING_SNAKE_CASE : List[Any] = current_set[0]
_SCREAMING_SNAKE_CASE : Tuple = [first_row]
_SCREAMING_SNAKE_CASE : Optional[Any] = current_set[1::]
for row in current_set:
_SCREAMING_SNAKE_CASE : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
_SCREAMING_SNAKE_CASE : Union[str, Any] = final_set[0]
_SCREAMING_SNAKE_CASE : int = []
_SCREAMING_SNAKE_CASE : Any = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
_SCREAMING_SNAKE_CASE : Optional[int] = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0, current_first_column[i] )
resultant.insert(0, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = resultant
return final_set
def _lowerCAmelCase ( lowerCamelCase__ : list[list] ) -> list:
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__, (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
_SCREAMING_SNAKE_CASE : Optional[Any] = equations.copy()
if any(0 in row for row in data_set ):
_SCREAMING_SNAKE_CASE : Dict = data_set.copy()
_SCREAMING_SNAKE_CASE : int = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
_SCREAMING_SNAKE_CASE : str = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0, lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[str] = data_set.copy()
_SCREAMING_SNAKE_CASE : Dict = simplify(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Dict = simplified[::-1]
_SCREAMING_SNAKE_CASE : list = []
for row in simplified:
_SCREAMING_SNAKE_CASE : Dict = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
_SCREAMING_SNAKE_CASE : str = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
_SCREAMING_SNAKE_CASE : List[Any] = temp_row[1::]
_SCREAMING_SNAKE_CASE : str = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for item in solutions:
final.append(float(round(lowerCamelCase__, 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Optional[int] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 295
|
"""simple docstring"""
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Union[str, Any]:
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ), end="\t" )
else:
print("INF", end="\t" )
print()
def _lowerCAmelCase ( lowerCamelCase__ : Dict, lowerCamelCase__ : Any ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = [[float("inf" ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase__ ):
# looping through rows of graph array
for i in range(lowerCamelCase__ ):
# looping through columns of graph array
for j in range(lowerCamelCase__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : List[Any] = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase__, lowerCamelCase__ )
return dist, v
if __name__ == "__main__":
lowercase_ : Tuple = int(input('''Enter number of vertices: '''))
lowercase_ : List[Any] = int(input('''Enter number of edges: '''))
lowercase_ : Optional[Any] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase_ : Tuple = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowercase_ : str = int(input('''Enter source:'''))
lowercase_ : Optional[Any] = int(input('''Enter destination:'''))
lowercase_ : Union[str, Any] = float(input('''Enter weight:'''))
lowercase_ : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 295
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
UpperCAmelCase = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Dict = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_INIT_CONFIGURATION
__A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[Any] = ElectraTokenizer
def __init__( self : Dict , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , snake_case__ : Any=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : str="[PAD]" , snake_case__ : Dict="[CLS]" , snake_case__ : Union[str, Any]="[MASK]" , snake_case__ : Optional[Any]=True , snake_case__ : str=None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowercase :Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case__ ) != tokenize_chinese_chars
):
lowercase :Any = getattr(snake_case__ , normalizer_state.pop('''type''' ) )
lowercase :int = do_lower_case
lowercase :List[Any] = strip_accents
lowercase :Optional[Any] = tokenize_chinese_chars
lowercase :Tuple = normalizer_class(**snake_case__ )
lowercase :List[str] = do_lower_case
def __snake_case ( self : Any , snake_case__ : Dict , snake_case__ : Any=None ):
'''simple docstring'''
lowercase :int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :Tuple = [self.sep_token_id]
lowercase :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
lowercase :str = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 677
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __magic_name__ ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def __snake_case ( snake_case__ : ArgumentParser ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError()
| 677
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 42
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self ,__SCREAMING_SNAKE_CASE = 16 ,__SCREAMING_SNAKE_CASE = 88 ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = 1 ,__SCREAMING_SNAKE_CASE = 0.0 ,__SCREAMING_SNAKE_CASE = 32 ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = False ,__SCREAMING_SNAKE_CASE = None ,__SCREAMING_SNAKE_CASE = "geglu" ,__SCREAMING_SNAKE_CASE = True ,__SCREAMING_SNAKE_CASE = True ,):
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = attention_head_dim
SCREAMING_SNAKE_CASE : Dict = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels
SCREAMING_SNAKE_CASE : List[Any] = torch.nn.GroupNorm(num_groups=__SCREAMING_SNAKE_CASE ,num_channels=__SCREAMING_SNAKE_CASE ,eps=1e-6 ,affine=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# 3. Define transformers blocks
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,dropout=__SCREAMING_SNAKE_CASE ,cross_attention_dim=__SCREAMING_SNAKE_CASE ,activation_fn=__SCREAMING_SNAKE_CASE ,attention_bias=__SCREAMING_SNAKE_CASE ,double_self_attention=__SCREAMING_SNAKE_CASE ,norm_elementwise_affine=__SCREAMING_SNAKE_CASE ,)
for d in range(__SCREAMING_SNAKE_CASE )
] )
SCREAMING_SNAKE_CASE : str = nn.Linear(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE=1 ,__SCREAMING_SNAKE_CASE=None ,__SCREAMING_SNAKE_CASE = True ,):
SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
SCREAMING_SNAKE_CASE : Optional[int] = batch_frames // num_frames
SCREAMING_SNAKE_CASE : Optional[int] = hidden_states
SCREAMING_SNAKE_CASE : Tuple = hidden_states[None, :].reshape(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = hidden_states.permute(0 ,2 ,1 ,3 ,4 )
SCREAMING_SNAKE_CASE : int = self.norm(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states.permute(0 ,3 ,4 ,2 ,1 ).reshape(batch_size * height * width ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Optional[int] = self.proj_in(__SCREAMING_SNAKE_CASE )
# 2. Blocks
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE : Optional[int] = block(
__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE ,timestep=__SCREAMING_SNAKE_CASE ,cross_attention_kwargs=__SCREAMING_SNAKE_CASE ,class_labels=__SCREAMING_SNAKE_CASE ,)
# 3. Output
SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_out(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = (
hidden_states[None, None, :]
.reshape(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
.permute(0 ,3 ,4 ,1 ,2 )
.contiguous()
)
SCREAMING_SNAKE_CASE : Dict = hidden_states.reshape(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__SCREAMING_SNAKE_CASE )
| 705
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A = 0
A = False
A = 3.0
class _a ( unittest.TestCase ):
"""simple docstring"""
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() ,{} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() ,{'a': 2} )
self.assertDictEqual(MockClass(a=2 ,b=__SCREAMING_SNAKE_CASE ).to_kwargs() ,{'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 ,c=2.25 ).to_kwargs() ,{'a': 2, 'c': 2.25} )
@require_cuda
def __a ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE : Union[str, Any] = GradScalerKwargs(init_scale=1024 ,growth_factor=2 )
AcceleratorState._reset_state()
SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(mixed_precision='fp16' ,kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
SCREAMING_SNAKE_CASE : int = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale ,1024.0 )
self.assertEqual(scaler._growth_factor ,2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor ,0.5 )
self.assertEqual(scaler._growth_interval ,2000 )
self.assertEqual(scaler._enabled ,__SCREAMING_SNAKE_CASE )
@require_multi_gpu
def __a ( self ):
SCREAMING_SNAKE_CASE : Optional[int] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__UpperCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
__UpperCAmelCase = torch.nn.Linear(100, 200)
__UpperCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
__UpperCAmelCase = ''
__UpperCAmelCase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 220
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return base * power(lowercase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
_UpperCAmelCase = int(input("""Enter the base: """).strip())
_UpperCAmelCase = int(input("""Enter the exponent: """).strip())
_UpperCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_UpperCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 409
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'Speech2TextFeatureExtractor'
UpperCamelCase : Optional[Any] = 'Speech2TextTokenizer'
def __init__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor
SCREAMING_SNAKE_CASE_: List[Any] =False
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""raw_speech""" )
else:
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""audio""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs.pop("""sampling_rate""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""text""" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: List[str] =args[0]
SCREAMING_SNAKE_CASE_: List[str] =args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_: Any =encodings["""input_ids"""]
return inputs
def lowerCamelCase__ ( self : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@contextmanager
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer
yield
SCREAMING_SNAKE_CASE_: int =self.feature_extractor
SCREAMING_SNAKE_CASE_: str =False
| 409
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 74
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger("""transformers.models.speecht5""")
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint["""input_conv.weight_g"""]
UpperCAmelCase = checkpoint["""input_conv.weight_v"""]
UpperCAmelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCAmelCase = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCAmelCase = checkpoint["""output_conv.1.weight_g"""]
UpperCAmelCase = checkpoint["""output_conv.1.weight_v"""]
UpperCAmelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=None , ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(_snake_case )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(_snake_case )
UpperCAmelCase = torch.load(_snake_case )
load_weights(orig_checkpoint["""model"""]["""generator"""] , _snake_case , _snake_case )
UpperCAmelCase = np.load(_snake_case )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
UpperCAmelCase = torch.from_numpy(_snake_case ).float()
model.save_pretrained(_snake_case )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCamelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 74
| 1
|
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase_ = '''docs/source/en/_toctree.yml'''
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
snake_case_ : List[str] = [key for key, value in counts.items() if value > 1]
snake_case_ : List[str] = []
for duplicate_key in duplicates:
snake_case_ : Union[str, Any] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : s["title"].lower() )
def lowerCamelCase_ ( _UpperCamelCase=False ) -> Dict:
"""simple docstring"""
with open(_UpperCamelCase , encoding='''utf-8''' ) as f:
snake_case_ : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : List[Any] = content[api_idx]['''sections''']
# Then to the model doc
snake_case_ : List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
snake_case_ : Any = api_doc[model_idx]['''sections''']
snake_case_ : Any = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if '''sections''' in section]
snake_case_ : Optional[int] = False
for idx, modality_doc in modalities_docs:
snake_case_ : Dict = modality_doc['''sections''']
snake_case_ : int = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
snake_case_ : Optional[int] = True
if overwrite:
snake_case_ : Dict = new_modality_doc
if diff:
if overwrite:
snake_case_ : str = model_doc
snake_case_ : int = api_doc
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_UpperCamelCase , allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase_ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 60
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
A = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowerCamelCase ( UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return torch.atana(UpperCamelCase , UpperCamelCase ) / math.pi * 2
def lowerCamelCase ( UpperCamelCase : str ) -> Union[str, Any]:
_lowerCamelCase = torch.sin(t * math.pi / 2 ) ** 2
_lowerCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(UpperCamelCase , UpperCamelCase )
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : Any ) -> Optional[Any]:
super().__init__()
_lowerCamelCase = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
_lowerCamelCase = deepcopy(self.diffusion )
_lowerCamelCase = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> List[str]:
_lowerCamelCase = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
A = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
A = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
A = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
A = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
A = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowerCamelCase ( UpperCamelCase : Tuple ) -> int:
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowerCamelCase ( UpperCamelCase : Optional[Any] ) -> Tuple:
for key, value in ATTN_MAP.items():
if name.startswith(UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
return name.replace(UpperCamelCase , UpperCamelCase )
elif name.startswith(UpperCamelCase ):
return [name.replace(UpperCamelCase , UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : int=13 ) -> Optional[int]:
_lowerCamelCase = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
_lowerCamelCase = 0
if string.startswith('net.3.' ):
depth += 1
_lowerCamelCase = string[6:]
elif string.startswith('net.' ):
_lowerCamelCase = string[4:]
while string.startswith('main.7.' ):
depth += 1
_lowerCamelCase = string[7:]
if string.startswith('main.' ):
_lowerCamelCase = string[5:]
# mid block
if string[:2].isdigit():
_lowerCamelCase = string[:2]
_lowerCamelCase = string[2:]
else:
_lowerCamelCase = string[0]
_lowerCamelCase = string[1:]
if depth == max_depth:
_lowerCamelCase = MID_NUM_TO_LAYER[layer_num]
_lowerCamelCase = 'mid_block'
elif depth > 0 and int(UpperCamelCase ) < 7:
_lowerCamelCase = DOWN_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""down_blocks.{depth}"""
elif depth > 0 and int(UpperCamelCase ) > 7:
_lowerCamelCase = UP_NUM_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
_lowerCamelCase = DEPTH_0_TO_LAYER[layer_num]
_lowerCamelCase = F"""up_blocks.{max_depth - 1}""" if int(UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
_lowerCamelCase = string_left[1:]
if "resnets" in new_layer:
_lowerCamelCase = convert_resconv_naming(UpperCamelCase )
elif "attentions" in new_layer:
_lowerCamelCase = convert_attn_naming(UpperCamelCase )
_lowerCamelCase = new_string_left
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = prefix + '.' + new_layer + '.' + string_left
else:
_lowerCamelCase = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def lowerCamelCase ( UpperCamelCase : List[Any] ) -> int:
_lowerCamelCase = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
_lowerCamelCase = rename(UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = transform_conv_attns(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
_lowerCamelCase = v
return new_state_dict
def lowerCamelCase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Optional[Any]:
if len(UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
_lowerCamelCase = v[:, :, 0]
else:
# bias
_lowerCamelCase = v
else:
# qkv matrices
_lowerCamelCase = v.shape[0]
_lowerCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
_lowerCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowerCamelCase ( UpperCamelCase : Any ) -> Optional[Any]:
_lowerCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
_lowerCamelCase = download(UpperCamelCase )
_lowerCamelCase = MODELS_MAP[model_name]['sample_rate']
_lowerCamelCase = MODELS_MAP[model_name]['sample_size']
_lowerCamelCase = Object()
_lowerCamelCase = sample_size
_lowerCamelCase = sample_rate
_lowerCamelCase = 0
_lowerCamelCase = UNetaDModel(sample_size=UpperCamelCase , sample_rate=UpperCamelCase )
_lowerCamelCase = diffusers_model.state_dict()
_lowerCamelCase = DiffusionUncond(UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase )['state_dict'] )
_lowerCamelCase = orig_model.diffusion_ema.eval()
_lowerCamelCase = orig_model.state_dict()
_lowerCamelCase = rename_orig_weights(UpperCamelCase )
_lowerCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
_lowerCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
_lowerCamelCase = value.squeeze()
_lowerCamelCase = value
diffusers_model.load_state_dict(UpperCamelCase )
_lowerCamelCase = 1_00
_lowerCamelCase = 33
_lowerCamelCase = IPNDMScheduler(num_train_timesteps=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(UpperCamelCase )
_lowerCamelCase = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase ).to(UpperCamelCase )
_lowerCamelCase = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase )[:-1]
_lowerCamelCase = get_crash_schedule(UpperCamelCase )
_lowerCamelCase = DanceDiffusionPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
_lowerCamelCase = torch.manual_seed(33 )
_lowerCamelCase = pipe(num_inference_steps=UpperCamelCase , generator=UpperCamelCase ).audios
_lowerCamelCase = sampling.iplms_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , {} )
_lowerCamelCase = generated.clamp(-1 , 1 )
_lowerCamelCase = (generated - audio).abs().sum()
_lowerCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , UpperCamelCase )
print('Diff max' , UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
A = parser.parse_args()
main(args)
| 544
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : int = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Tuple = 'data2vec-text'
def __init__( self , _A=30522 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-1_2 , _A=1 , _A=0 , _A=2 , _A="absolute" , _A=True , _A=None , **_A , ):
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
__A : List[str] = vocab_size
__A : List[str] = hidden_size
__A : str = num_hidden_layers
__A : Any = num_attention_heads
__A : Optional[int] = hidden_act
__A : Optional[int] = intermediate_size
__A : Union[str, Any] = hidden_dropout_prob
__A : List[Any] = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : Union[str, Any] = type_vocab_size
__A : Optional[int] = initializer_range
__A : List[str] = layer_norm_eps
__A : Any = position_embedding_type
__A : int = use_cache
__A : List[Any] = classifier_dropout
class _A( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
if self.task == "multiple-choice":
__A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__A : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 717
|
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if len(a ) != 2 or len(a[0] ) != 2 or len(a ) != 2 or len(b[0] ) != 2:
raise Exception('Matrices are not 2x2' )
__A : Optional[int] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> str:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[int]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a ) )
]
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[list, list, list, list]:
if len(a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('Odd matrices are not supported!' )
__A : str = len(a )
__A : List[Any] = matrix_length // 2
__A : List[str] = [[a[i][j] for j in range(a , a )] for i in range(a )]
__A : Dict = [
[a[i][j] for j in range(a , a )] for i in range(a , a )
]
__A : int = [[a[i][j] for j in range(a )] for i in range(a )]
__A : Any = [[a[i][j] for j in range(a )] for i in range(a , a )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE ( a ) -> tuple[int, int]:
return len(a ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE ( a ) -> None:
print('\n'.join(str(a ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a ) == (2, 2):
return default_matrix_multiplication(a , a )
__A , __A , __A , __A : str = split_matrix(a )
__A , __A , __A , __A : List[Any] = split_matrix(a )
__A : Any = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Tuple = actual_strassen(matrix_addition(a , a ) , a )
__A : List[str] = actual_strassen(matrix_addition(a , a ) , a )
__A : Optional[int] = actual_strassen(a , matrix_subtraction(a , a ) )
__A : Any = actual_strassen(matrix_addition(a , a ) , matrix_addition(a , a ) )
__A : Any = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = actual_strassen(matrix_subtraction(a , a ) , matrix_addition(a , a ) )
__A : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
__A : Union[str, Any] = matrix_addition(a , a )
__A : str = matrix_addition(a , a )
__A : Dict = matrix_subtraction(matrix_subtraction(matrix_addition(a , a ) , a ) , a )
# construct the new matrix from our 4 quadrants
__A : List[Any] = []
for i in range(len(a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE ( a , a ) -> list:
if matrix_dimensions(a )[1] != matrix_dimensions(a )[0]:
__A : Dict = (
'Unable to multiply these matrices, please check the dimensions.\n'
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a )
__A : int = matrix_dimensions(a )
__A : Any = matrix_dimensions(a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__A : List[Any] = max(*a , *a )
__A : Optional[Any] = int(math.pow(2 , math.ceil(math.loga(a ) ) ) )
__A : Union[str, Any] = matrixa
__A : Optional[int] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__A : str = actual_strassen(a , a )
# Removing the additional zeros
for i in range(0 , a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 77
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__A : Any = 0
__A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__A : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__A : int = tuple[int, int]
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_A = pos_x
_A = pos_y
_A = (pos_y, pos_x)
_A = goal_x
_A = goal_y
_A = g_cost
_A = parent
_A = self.calculate_heuristic()
_A = self.g_cost + self.h_cost
def lowerCAmelCase__ ( self ):
_A = self.pos_x - self.goal_x
_A = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
_A = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_A = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , snake_case_ )
_A = [self.start]
_A = []
_A = False
def lowerCAmelCase__ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_A = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_A = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_A = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def lowerCAmelCase__ ( self , snake_case_ ):
_A = []
for action in delta:
_A = parent.pos_x + action[1]
_A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def lowerCAmelCase__ ( self , snake_case_ ):
_A = node
_A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_A = current_node.parent
path.reverse()
return path
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
_A = AStar(snake_case_ , snake_case_ )
_A = AStar(snake_case_ , snake_case_ )
_A = False
def lowerCAmelCase__ ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_A = self.fwd_astar.open_nodes.pop(0 )
_A = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_A = current_bwd_node
_A = current_fwd_node
_A = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_A = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = self.fwd_astar.retrace_path(snake_case_ )
_A = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_A = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__A : str = (0, 0)
__A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__A : Union[str, Any] = time.time()
__A : Any = AStar(init, goal)
__A : Optional[Any] = a_star.search()
__A : Dict = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
__A : Tuple = time.time()
__A : str = BidirectionalAStar(init, goal)
__A : Union[str, Any] = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 27
|
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Dict = set()
# Replace all the whitespace in our sentence
snake_case : List[Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(__lowerCamelCase ) == 26
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
snake_case : Optional[int] = [False] * 26
for char in input_str:
if char.islower():
snake_case : Dict = True
elif char.isupper():
snake_case : Optional[Any] = True
return all(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : str = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase ( ):
from timeit import timeit
snake_case : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_faster()" , setup=__lowerCamelCase ) )
print(timeit("is_pangram_fastest()" , setup=__lowerCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 204
| 0
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( __snake_case ) -> List[str]:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def _snake_case ( __snake_case , __snake_case ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ : str = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase_ : Union[str, Any] = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
UpperCAmelCase_ : Union[str, Any] = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
UpperCAmelCase_ : int = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
UpperCAmelCase_ : int = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
UpperCAmelCase_ : str = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
UpperCAmelCase_ : str = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
UpperCAmelCase_ : str = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
UpperCAmelCase_ : Optional[Any] = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
UpperCAmelCase_ : Dict = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
UpperCAmelCase_ : Optional[Any] = key.replace("image_encoder.module" , "flava.image_model" )
UpperCAmelCase_ : Optional[int] = key.replace("text_encoder.module" , "flava.text_model" )
UpperCAmelCase_ : List[Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
UpperCAmelCase_ : Tuple = key.replace("mm_encoder.module" , "flava.multimodal_model" )
UpperCAmelCase_ : Optional[Any] = key.replace("text_projection" , "flava.text_projection" )
UpperCAmelCase_ : int = key.replace("image_projection" , "flava.image_projection" )
UpperCAmelCase_ : Optional[Any] = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase_ : int = value
return upgrade
@torch.no_grad()
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case=None ) -> Any:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase_ : Union[str, Any] = FlavaConfig.from_pretrained(__snake_case )
else:
UpperCAmelCase_ : Any = FlavaConfig()
UpperCAmelCase_ : Optional[int] = FlavaForPreTraining(__snake_case ).eval()
UpperCAmelCase_ : List[Any] = convert_dalle_checkpoint(__snake_case , __snake_case , save_checkpoint=__snake_case )
if os.path.exists(__snake_case ):
UpperCAmelCase_ : int = torch.load(__snake_case , map_location="cpu" )
else:
UpperCAmelCase_ : Optional[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
UpperCAmelCase_ : str = upgrade_state_dict(__snake_case , __snake_case )
hf_model.load_state_dict(__snake_case )
UpperCAmelCase_ : Optional[int] = hf_model.state_dict()
UpperCAmelCase_ : str = count_parameters(__snake_case )
UpperCAmelCase_ : Tuple = count_parameters(__snake_case ) + count_parameters(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCamelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 455
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase = 16
__lowerCamelCase = 32
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = 1_6 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : List[Any] = DatasetDict(
{
"train": dataset["train"].select(__snake_case ),
"validation": dataset["train"].select(__snake_case ),
"test": dataset["validation"],
} )
def tokenize_function(__snake_case ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : int = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : str = 8
else:
UpperCAmelCase_ : Dict = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets["test"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader, test_dataloader
def _snake_case ( __snake_case , __snake_case ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
# Download the dataset
UpperCAmelCase_ : Optional[int] = load_dataset("glue" , "mrpc" )
# Create our splits
UpperCAmelCase_ : Optional[Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
UpperCAmelCase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[int] = config["lr"]
UpperCAmelCase_ : Dict = int(config["num_epochs"] )
UpperCAmelCase_ : Union[str, Any] = int(config["seed"] )
UpperCAmelCase_ : Optional[Any] = int(config["batch_size"] )
UpperCAmelCase_ : Optional[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : int = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
# New Code #
# Create our folds:
UpperCAmelCase_ : int = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
UpperCAmelCase_ : Dict = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = get_fold_dataloaders(
__snake_case , __snake_case , __snake_case , __snake_case , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Tuple = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : str = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Optional[Any] = model(**__snake_case )
UpperCAmelCase_ : Dict = outputs.loss
UpperCAmelCase_ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : str = model(**__snake_case )
UpperCAmelCase_ : Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __snake_case )
# New Code #
# We also run predictions on the test set at the very end
UpperCAmelCase_ : Union[str, Any] = []
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**__snake_case )
UpperCAmelCase_ : Optional[int] = outputs.logits
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__snake_case , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
UpperCAmelCase_ : Union[str, Any] = torch.cat(__snake_case , dim=0 )
UpperCAmelCase_ : str = torch.stack(__snake_case , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
UpperCAmelCase_ : List[str] = metric.compute(predictions=__snake_case , references=__snake_case )
accelerator.print("Average test metrics from all folds:" , __snake_case )
def _snake_case ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=__snake_case , default=3 , help="The number of splits to perform across the dataset" )
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ : List[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 455
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "speech_to_text"
snake_case__ : Any = ["past_key_values"]
snake_case__ : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , __lowerCAmelCase : List[str]=1_0_0_0_0 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : int=4 , __lowerCAmelCase : Optional[Any]=6 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Union[str, Any]="relu" , __lowerCAmelCase : Tuple=2_5_6 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=1 , __lowerCAmelCase : str=0 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : str=6_0_0_0 , __lowerCAmelCase : Any=1_0_2_4 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=(5, 5) , __lowerCAmelCase : Optional[int]=1_0_2_4 , __lowerCAmelCase : List[Any]=8_0 , __lowerCAmelCase : int=1 , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : List[str] = d_model
_lowerCamelCase : Any = encoder_ffn_dim
_lowerCamelCase : str = encoder_layers
_lowerCamelCase : List[str] = encoder_attention_heads
_lowerCamelCase : int = decoder_ffn_dim
_lowerCamelCase : int = decoder_layers
_lowerCamelCase : Optional[Any] = decoder_attention_heads
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : List[Any] = activation_dropout
_lowerCamelCase : Optional[Any] = activation_function
_lowerCamelCase : Optional[Any] = init_std
_lowerCamelCase : str = encoder_layerdrop
_lowerCamelCase : Union[str, Any] = decoder_layerdrop
_lowerCamelCase : Any = use_cache
_lowerCamelCase : Union[str, Any] = encoder_layers
_lowerCamelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : int = max_source_positions
_lowerCamelCase : int = max_target_positions
_lowerCamelCase : Dict = num_conv_layers
_lowerCamelCase : Union[str, Any] = list(__lowerCAmelCase )
_lowerCamelCase : str = conv_channels
_lowerCamelCase : str = input_feat_per_channel
_lowerCamelCase : Tuple = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 83
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
lowerCAmelCase__ = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Any = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowerCamelCase : Tuple = False
# source code of `config_class`
_lowerCamelCase : int = inspect.getsource(A_ )
_lowerCamelCase : str = _re_checkpoint.findall(A_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowerCamelCase , _lowerCamelCase : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowerCamelCase : Tuple = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowerCamelCase : Union[str, Any] = True
break
_lowerCamelCase : Tuple = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A_ )
if len(A_ ) > 0:
_lowerCamelCase : Union[str, Any] = '''\n'''.join(sorted(A_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 83
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class a__ ( __snake_case ):
A__ : Tuple = 'canine'
def __init__( self , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_6_3_8_4 , UpperCAmelCase=1_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=0 , UpperCAmelCase=0Xe000 , UpperCAmelCase=0Xe001 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase=8 , UpperCAmelCase=1_6_3_8_4 , UpperCAmelCase=1_2_8 , **UpperCAmelCase , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
# Character config:
__a = downsampling_rate
__a = upsampling_kernel_size
__a = num_hash_functions
__a = num_hash_buckets
__a = local_transformer_stride
| 246
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase( __lowerCamelCase ):
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase , __lowerCamelCase ) -> bool:
__a = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__a = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
__a = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 , ):
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ):
def identity_function(__lowerCamelCase ) -> float:
return x
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('******************' )
def lowerCAmelCase( __lowerCamelCase ):
def function_to_integrate(__lowerCamelCase ) -> float:
return sqrt(4.0 - x * x )
__a = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.1_5 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ = self.validation_dir
SCREAMING_SNAKE_CASE__ = data_files if data_files else None
@dataclass
class lowerCamelCase :
lowerCamelCase__ : str = field(
default=A__ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase__ : str = field(default=A__ ,metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase__ : float = field(
default=0.7_5 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : float = field(
default=1E-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , a__ , a__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ = ds["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ = split["""train"""]
SCREAMING_SNAKE_CASE__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.config_name , **a__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a__ )
else:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a__ )
else:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(a__ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = ds["""train"""].column_names
else:
SCREAMING_SNAKE_CASE__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ = """image"""
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ = """img"""
else:
SCREAMING_SNAKE_CASE__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE__ = Compose(
[
Lambda(lambda snake_case__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(a__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ ):
SCREAMING_SNAKE_CASE__ = [transforms(a__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a__ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=a__ , args=a__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics("""eval""" , a__ )
trainer.save_metrics("""eval""" , a__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def A ( snake_case__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 196
|
import math
import qiskit
def lowerCAmelCase__ ( a__: int = 1 , a__: int = 1 , a__: int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(a__ , a__ )
or isinstance(a__ , a__ )
or isinstance(a__ , a__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(a__ ) != input_a)
or (math.floor(a__ ) != input_a)
or (math.floor(a__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_UpperCAmelCase = qiskit.QuantumRegister(4 , 'qr' )
_UpperCAmelCase = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_UpperCAmelCase = [input_a, input_a, carry_in]
_UpperCAmelCase = qiskit.QuantumCircuit(a__ , a__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(a__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , a__ ) # measure the last two qbits
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
_UpperCAmelCase = qiskit.execute(a__ , a__ , shots=1_0_0_0 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 618
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : str = ["input_features", "is_longer"]
def __init__( self : Union[str, Any] , _lowerCAmelCase : int=64 , _lowerCAmelCase : int=4_80_00 , _lowerCAmelCase : int=4_80 , _lowerCAmelCase : List[Any]=10 , _lowerCAmelCase : Dict=10_24 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=False , _lowerCAmelCase : float = 0 , _lowerCAmelCase : float = 1_40_00 , _lowerCAmelCase : int = None , _lowerCAmelCase : str = "fusion" , _lowerCAmelCase : str = "repeatpad" , **_lowerCAmelCase : str , ):
super().__init__(
feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
__snake_case : Optional[Any] = top_db
__snake_case : str = truncation
__snake_case : Optional[int] = padding
__snake_case : List[str] = fft_window_size
__snake_case : str = (fft_window_size >> 1) + 1
__snake_case : Dict = hop_length
__snake_case : List[str] = max_length_s
__snake_case : List[Any] = max_length_s * sampling_rate
__snake_case : List[str] = sampling_rate
__snake_case : Optional[Any] = frequency_min
__snake_case : List[str] = frequency_max
__snake_case : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm=_lowerCAmelCase , mel_scale="""htk""" , )
__snake_case : Any = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_lowerCAmelCase , min_frequency=_lowerCAmelCase , max_frequency=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def snake_case__ ( self : Optional[int] ):
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__ ( self : List[str] , _lowerCAmelCase : np.array , _lowerCAmelCase : Optional[np.array] = None ):
__snake_case : int = spectrogram(
_lowerCAmelCase , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_lowerCAmelCase , log_mel="""dB""" , )
return log_mel_spectrogram.T
def snake_case__ ( self : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
__snake_case : Optional[int] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : str = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : int = [0]
# randomly choose index for each part
__snake_case : Tuple = np.random.choice(ranges[0] )
__snake_case : Tuple = np.random.choice(ranges[1] )
__snake_case : Tuple = np.random.choice(ranges[2] )
__snake_case : List[str] = mel[idx_front : idx_front + chunk_frames, :]
__snake_case : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case : int = mel[idx_back : idx_back + chunk_frames, :]
__snake_case : int = torch.tensor(mel[None, None, :] )
__snake_case : Any = torch.nn.functional.interpolate(
_lowerCAmelCase , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=_lowerCAmelCase )
__snake_case : str = mel_shrink[0][0].numpy()
__snake_case : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case__ ( self : List[Any] , _lowerCAmelCase : np.array , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case : str = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case : Optional[Any] = len(_lowerCAmelCase ) - max_length
__snake_case : Tuple = np.random.randint(0 , overflow + 1 )
__snake_case : int = waveform[idx : idx + max_length]
__snake_case : Optional[Any] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__snake_case : Optional[int] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters )
__snake_case : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case : int = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case : Union[str, Any] = np.stack([mel, mel, mel, mel] , axis=0 )
__snake_case : Optional[int] = False
else:
__snake_case : List[str] = self._random_mel_fusion(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
__snake_case : List[str] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case : int = int(max_length / len(_lowerCAmelCase ) )
__snake_case : Tuple = np.stack(np.tile(_lowerCAmelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__snake_case : Optional[int] = int(max_length / len(_lowerCAmelCase ) )
__snake_case : int = np.stack(np.tile(_lowerCAmelCase , _lowerCAmelCase ) )
__snake_case : Optional[Any] = np.pad(_lowerCAmelCase , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
__snake_case : Union[str, Any] = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters )
__snake_case : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__snake_case : Dict = self._np_extract_fbank_features(_lowerCAmelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , _lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCAmelCase : str = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , **_lowerCAmelCase : str , ):
__snake_case : Optional[int] = truncation if truncation is not None else self.truncation
__snake_case : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__snake_case : Union[str, Any] = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__snake_case : Optional[Any] = is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : int = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ):
__snake_case : List[Any] = np.asarray(_lowerCAmelCase , dtype=np.floataa )
elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : str = [np.asarray(_lowerCAmelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case : Dict = [
self._get_input_mel(_lowerCAmelCase , max_length if max_length else self.nb_max_samples , _lowerCAmelCase , _lowerCAmelCase )
for waveform in raw_speech
]
__snake_case : Tuple = []
__snake_case : Optional[Any] = []
for mel, longer in padded_inputs:
input_mel.append(_lowerCAmelCase )
is_longer.append(_lowerCAmelCase )
if truncation == "fusion" and sum(_lowerCAmelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case : Optional[int] = np.random.randint(0 , len(_lowerCAmelCase ) )
__snake_case : Dict = True
if isinstance(input_mel[0] , _lowerCAmelCase ):
__snake_case : int = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__snake_case : Union[str, Any] = [[longer] for longer in is_longer]
__snake_case : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
__snake_case : str = BatchFeature(_lowerCAmelCase )
if return_tensors is not None:
__snake_case : Optional[int] = input_features.convert_to_tensors(_lowerCAmelCase )
return input_features
| 706
|
lowercase_ = {str(digit): digit**5 for digit in range(10)}
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( ):
'''simple docstring'''
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 390
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
A : Optional[int] = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( A ):
__lowerCamelCase = (DDIMParallelScheduler,)
__lowerCamelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def _snake_case ( self , **__A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__A )
return config
def _snake_case ( self , **__A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str =self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =10, 0.0
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Optional[int] =model(__A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ) -> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
SCREAMING_SNAKE_CASE_ : Dict =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : Dict =scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self ) -> int:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _snake_case ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _snake_case ( self ) -> List[Any]:
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _snake_case ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def _snake_case ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _snake_case ( self ) -> int:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] =scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =10, 0.0
scheduler.set_timesteps(__A )
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : str =self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : int =self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Tuple =self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =samplea.shape[0]
SCREAMING_SNAKE_CASE_ : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ : Any =torch.arange(__A )[0:3, None].repeat(1 , __A )
SCREAMING_SNAKE_CASE_ : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
SCREAMING_SNAKE_CASE_ : str =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : str =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _snake_case ( self ) -> Dict:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Tuple =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 443
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A : List[Any] = TypeVar("T")
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (position - 1) // 2
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (2 * position) + 1
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (2 * position) + 2
class _UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def snake_case ( self ):
return self.elements == 0
def snake_case ( self , __a , __a ):
self.heap.append((elem, weight) )
__lowerCAmelCase = self.elements
self.elements += 1
self._bubble_up(UpperCAmelCase__ )
def snake_case ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCAmelCase = self.heap[0]
self._bubble_down(UpperCAmelCase__ )
return elem
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.position_map[elem]
__lowerCAmelCase = (elem, weight)
if position > 0:
__lowerCAmelCase = get_parent_position(UpperCAmelCase__ )
__lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
else:
self._bubble_down(UpperCAmelCase__ )
def snake_case ( self , __a ):
__lowerCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCAmelCase = get_parent_position(UpperCAmelCase__ )
__lowerCAmelCase = self.heap[curr_pos]
__lowerCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_up(UpperCAmelCase__ )
return None
def snake_case ( self , __a ):
__lowerCAmelCase = self.position_map[elem]
__lowerCAmelCase = self.heap[curr_pos]
__lowerCAmelCase = get_child_left_position(UpperCAmelCase__ )
__lowerCAmelCase = get_child_right_position(UpperCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCAmelCase = self.heap[child_left_position]
__lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
if child_left_position < self.elements:
__lowerCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
else:
return None
if child_right_position < self.elements:
__lowerCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCAmelCase__ , UpperCAmelCase__ )
return self._bubble_down(UpperCAmelCase__ )
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.heap[nodea_pos][0]
__lowerCAmelCase = self.heap[nodea_pos][0]
__lowerCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCAmelCase = nodea_pos
__lowerCAmelCase = nodea_pos
class _UpperCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = {}
__lowerCAmelCase = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def snake_case ( self , __a ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCAmelCase = {}
self.nodes += 1
def snake_case ( self , __a , __a , __a ):
self.add_node(UpperCAmelCase__ )
self.add_node(UpperCAmelCase__ )
__lowerCAmelCase = weight
__lowerCAmelCase = weight
def _lowerCamelCase ( _UpperCamelCase , ):
'''simple docstring'''
__lowerCAmelCase = {node: maxsize for node in graph.connections}
__lowerCAmelCase = {node: None for node in graph.connections}
__lowerCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCamelCase , _UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCAmelCase = priority_queue.extract_min()
__lowerCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCamelCase , dist[neighbour] )
__lowerCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCamelCase , dist[neighbour] )
__lowerCAmelCase = node
return dist, parent
| 700
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 282
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Dict , *_a:str , **_a:Any ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 33
|
'''simple docstring'''
from math import factorial
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
"""simple docstring"""
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
snake_case__ : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
snake_case__ : str = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 347
| 0
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase_ : int = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class __lowerCAmelCase :
"""simple docstring"""
snake_case = PegasusConfig
snake_case = {}
snake_case = "gelu"
def __init__( self : List[Any] , _snake_case : int , _snake_case : Optional[Any]=13 , _snake_case : Dict=7 , _snake_case : List[str]=True , _snake_case : Dict=False , _snake_case : Dict=99 , _snake_case : List[Any]=32 , _snake_case : int=5 , _snake_case : int=4 , _snake_case : Any=37 , _snake_case : Tuple=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Dict=20 , _snake_case : str=2 , _snake_case : Tuple=1 , _snake_case : Optional[Any]=0 , ) -> Tuple:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = eos_token_id
A_ = pad_token_id
A_ = bos_token_id
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
A_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
A_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A_ = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : Dict , _snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
A_ = 20
A_ = model_class_name(_snake_case )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , )
A_ = model.decode(_snake_case , _snake_case )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def lowerCamelCase__ ( self : str , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Dict ) -> Tuple:
"""simple docstring"""
A_ = 20
A_ = model_class_name(_snake_case )
A_ = model.encode(inputs_dict["input_ids"] )
A_ , A_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
A_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A_ = model.decode(
decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , )
A_ = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case )
A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A_ (__a , __a , __a , __a=None , __a=None , ):
'''simple docstring'''
if attention_mask is None:
A_ = np.not_equal(__a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
A_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class __lowerCAmelCase ( _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
snake_case = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
snake_case = True
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A_ = FlaxPegasusModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(_snake_case , _snake_case )
A_ = model_class(_snake_case )
@jax.jit
def encode_jitted(_snake_case : Any , _snake_case : Tuple=None , **_snake_case : List[Any] ):
return model.encode(input_ids=_snake_case , attention_mask=_snake_case )
with self.subTest("JIT Enabled" ):
A_ = encode_jitted(**_snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = encode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = model_class(_snake_case )
A_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_snake_case : List[Any] , _snake_case : Any , _snake_case : str ):
return model.decode(
decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , )
with self.subTest("JIT Enabled" ):
A_ = decode_jitted(**_snake_case ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = decode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=_snake_case )
A_ = np.ones((1, 1) )
A_ = model(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
A_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
A_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
A_ = tokenizer(_snake_case , return_tensors="np" , truncation=_snake_case , max_length=512 , padding=_snake_case )
A_ = model.generate(**_snake_case , num_beams=2 ).sequences
A_ = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
assert tgt_text == decoded
| 482
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
UpperCamelCase_ : int = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __lowerCAmelCase ( unittest.TestCase , _lowercase ):
"""simple docstring"""
def lowerCamelCase__ ( self : str ) -> Dict:
"""simple docstring"""
A_ = load_tool("text-question-answering" )
self.tool.setup()
A_ = load_tool("text-question-answering" , remote=_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A_ = self.remote_tool(_snake_case , "What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A_ = self.remote_tool(text=_snake_case , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_snake_case , "launched the BigScience Research Workshop" )
| 482
| 1
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCAmelCase = HfArgumentParser(InitializationArguments)
lowerCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCAmelCase = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 43
|
'''simple docstring'''
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
A: Any = int(np.ceil((x_end - xa) / h ) )
A: Union[str, Any] = np.zeros((n + 1,) )
A: Optional[int] = ya
A: int = xa
for k in range(lowerCamelCase__ ):
A: Optional[int] = f(lowerCamelCase__ , y[k] )
A: int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
A: Optional[int] = f(x + h , y[k] + h * ka )
A: Optional[Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 0
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase ( a_ : List[Any] , a_ : List[str] , a_ : Optional[Any]):
lowerCamelCase :List[Any] = 1.5
lowerCamelCase :Any = int(factor * num_class_images)
lowerCamelCase :List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1)
os.makedirs(F"{class_data_dir}/images" , exist_ok=a_)
if len(list(Path(F"{class_data_dir}/images").iterdir())) >= num_class_images:
return
while True:
lowerCamelCase :Dict = client.query(text=a_)
if len(a_) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCamelCase :Tuple = int(factor * num_images)
lowerCamelCase :Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=a_ , aesthetic_weight=0.1 , )
lowerCamelCase :List[str] = 0
lowerCamelCase :Tuple = 0
lowerCamelCase :Union[str, Any] = tqdm(desc='''downloading real regularization images''' , total=a_)
with open(F"{class_data_dir}/caption.txt" , '''w''') as fa, open(F"{class_data_dir}/urls.txt" , '''w''') as fa, open(
F"{class_data_dir}/images.txt" , '''w''') as fa:
while total < num_class_images:
lowerCamelCase :List[Any] = class_images[count]
count += 1
try:
lowerCamelCase :Dict = requests.get(images['''url'''])
if img.status_code == 2_00:
lowerCamelCase :Any = Image.open(BytesIO(img.content))
with open(F"{class_data_dir}/images/{total}.jpg" , '''wb''') as f:
f.write(img.content)
fa.write(images['''caption'''] + '''\n''')
fa.write(images['''url'''] + '''\n''')
fa.write(F"{class_data_dir}/images/{total}.jpg" + '''\n''')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def _lowerCamelCase ( ):
lowerCamelCase :List[Any] = argparse.ArgumentParser('''''' , add_help=a_)
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=a_ , type=a_)
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=a_ , type=a_)
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=a_)
return parser.parse_args()
if __name__ == "__main__":
A__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 49
|
import os
from math import logaa
def _lowerCamelCase ( a_ : str = "base_exp.txt"):
lowerCamelCase :float = 0
lowerCamelCase :Optional[int] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a_) , a_))):
lowerCamelCase , lowerCamelCase :Optional[int] = list(map(a_ , line.split(''',''')))
if x * logaa(a_) > largest:
lowerCamelCase :List[Any] = x * logaa(a_)
lowerCamelCase :Any = i + 1
return result
if __name__ == "__main__":
print(solution())
| 49
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = (DEISMultistepScheduler,)
UpperCAmelCase__ = (('''num_inference_steps''', 25),)
def snake_case__ ( self : int , **lowercase__ : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**lowercase__ )
return config
def snake_case__ ( self : int , lowercase__ : Optional[int]=0 , **lowercase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : str = dict(self.forward_default_kwargs )
_UpperCamelCase : Any = kwargs.pop("num_inference_steps" , lowercase__ )
_UpperCamelCase : Union[str, Any] = self.dummy_sample
_UpperCamelCase : Dict = 0.1 * sample
_UpperCamelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Dict = self.get_scheduler_config(**lowercase__ )
_UpperCamelCase : str = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_UpperCamelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_UpperCamelCase : Dict = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
_UpperCamelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase , _UpperCamelCase : List[str] = sample, sample
for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1 ):
_UpperCamelCase : Optional[Any] = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_UpperCamelCase : str = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Tuple ) ->str:
'''simple docstring'''
pass
def snake_case__ ( self : List[str] , lowercase__ : Optional[int]=0 , **lowercase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[int] = dict(self.forward_default_kwargs )
_UpperCamelCase : Tuple = kwargs.pop("num_inference_steps" , lowercase__ )
_UpperCamelCase : Optional[int] = self.dummy_sample
_UpperCamelCase : Dict = 0.1 * sample
_UpperCamelCase : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : int = self.get_scheduler_config()
_UpperCamelCase : Union[str, Any] = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCamelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
_UpperCamelCase : Any = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCamelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCamelCase : Tuple = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_UpperCamelCase : Optional[Any] = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any] , lowercase__ : Union[str, Any]=None , **lowercase__ : int ) ->Any:
'''simple docstring'''
if scheduler is None:
_UpperCamelCase : Any = self.scheduler_classes[0]
_UpperCamelCase : Optional[Any] = self.get_scheduler_config(**lowercase__ )
_UpperCamelCase : Dict = scheduler_class(**lowercase__ )
_UpperCamelCase : Tuple = self.scheduler_classes[0]
_UpperCamelCase : Any = self.get_scheduler_config(**lowercase__ )
_UpperCamelCase : Tuple = scheduler_class(**lowercase__ )
_UpperCamelCase : int = 10
_UpperCamelCase : List[str] = self.dummy_model()
_UpperCamelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Optional[Any] = model(lowercase__ , lowercase__ )
_UpperCamelCase : List[Any] = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def snake_case__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = dict(self.forward_default_kwargs )
_UpperCamelCase : str = kwargs.pop("num_inference_steps" , lowercase__ )
for scheduler_class in self.scheduler_classes:
_UpperCamelCase : Optional[int] = self.get_scheduler_config()
_UpperCamelCase : int = scheduler_class(**lowercase__ )
_UpperCamelCase : List[str] = self.dummy_sample
_UpperCamelCase : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , "set_timesteps" ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , "set_timesteps" ):
_UpperCamelCase : int = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCamelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_UpperCamelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCamelCase : Any = scheduler.timesteps[5]
_UpperCamelCase : List[str] = scheduler.timesteps[6]
_UpperCamelCase : int = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
_UpperCamelCase : List[Any] = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCamelCase : Optional[int] = self.full_loop(scheduler=lowercase__ )
_UpperCamelCase : Optional[Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
_UpperCamelCase : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCamelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Any = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCamelCase : Any = self.full_loop(scheduler=lowercase__ )
_UpperCamelCase : Any = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self : List[str] ) ->int:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def snake_case__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , algorithm_type="deis" , solver_order=lowercase__ , solver_type=lowercase__ , )
def snake_case__ ( self : Any ) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def snake_case__ ( self : int ) ->List[Any]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , algorithm_type=lowercase__ , )
_UpperCamelCase : List[str] = self.full_loop(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , algorithm_type=lowercase__ , )
assert not torch.isnan(lowercase__ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Dict ) ->Dict:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase__ )
self.check_over_configs(lower_order_final=lowercase__ )
def snake_case__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowercase__ , time_step=0 )
def snake_case__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.full_loop()
_UpperCamelCase : Dict = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCamelCase : Union[str, Any] = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = self.scheduler_classes[0]
_UpperCamelCase : Tuple = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0 )
_UpperCamelCase : List[Any] = scheduler_class(**lowercase__ )
_UpperCamelCase : Union[str, Any] = 10
_UpperCamelCase : int = self.dummy_model()
_UpperCamelCase : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCamelCase : Union[str, Any] = model(lowercase__ , lowercase__ )
_UpperCamelCase : Tuple = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
assert sample.dtype == torch.floataa
| 435
|
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> str:
'''simple docstring'''
_UpperCamelCase : str = [0 for i in range(r + 1 )]
# nc0 = 1
_UpperCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
_UpperCamelCase : int = min(UpperCAmelCase ,UpperCAmelCase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 435
| 1
|
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCamelCase ( lowercase__="ro" , lowercase__="en" , lowercase__="wmt16" , lowercase__=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__SCREAMING_SNAKE_CASE : int = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = datasets.load_dataset(lowercase__ , lowercase__ )
if save_dir is None:
__SCREAMING_SNAKE_CASE : Dict = F'''{dataset}-{pair}'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = Path(lowercase__ )
save_dir.mkdir(exist_ok=lowercase__ )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__SCREAMING_SNAKE_CASE : str = '''val''' if split == '''validation''' else split
__SCREAMING_SNAKE_CASE : str = save_dir.joinpath(F'''{fn}.source''' )
__SCREAMING_SNAKE_CASE : List[str] = save_dir.joinpath(F'''{fn}.target''' )
__SCREAMING_SNAKE_CASE : Dict = src_path.open('''w+''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__SCREAMING_SNAKE_CASE : Tuple = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 715
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
@slow
def __magic_name__( self :Optional[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__SCREAMING_SNAKE_CASE : Any = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
| 260
| 0
|
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("""The grid does not contain the appropriate information""" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCAmelCase__ : Optional[Any] = grid[0]
for row_n in range(1 , len(__UpperCamelCase ) ):
UpperCAmelCase__ : str = grid[row_n]
UpperCAmelCase__ : Optional[int] = fill_row(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase__ : Any = grid[row_n]
return grid[-1][-1]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) -> int:
"""simple docstring"""
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = ids_tensor([self.batch_size] , self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
a__ = LlamaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
a__ = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
a__ = True
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
a__ = True
a__ = True
a__ = LlamaForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE , )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
a__ = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase(_lowercase , _lowercase , _lowercase , unittest.TestCase ):
__snake_case: Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__snake_case: Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
__snake_case: List[Any] = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case: List[Any] = False
__snake_case: List[str] = False
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = LlamaModelTester(self )
a__ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'single_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self ) -> int:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = 'multi_label_classification'
a__ = input_dict['input_ids']
a__ = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = LlamaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
a__ = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = ids_tensor([1, 1_0] , config.vocab_size )
a__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
original_model.to(__SCREAMING_SNAKE_CASE )
original_model.eval()
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = original_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
a__ = {'type': scaling_type, 'factor': 10.0}
a__ = LlamaModel(__SCREAMING_SNAKE_CASE )
scaled_model.to(__SCREAMING_SNAKE_CASE )
scaled_model.eval()
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
a__ = scaled_model(__SCREAMING_SNAKE_CASE ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-5 ) )
@require_torch
class lowercase(unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Any:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
a__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
# Expected mean on dim = -1
a__ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
a__ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
a__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
a__ = model(torch.tensor(__SCREAMING_SNAKE_CASE ) )
a__ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __SCREAMING_SNAKE_CASE , atol=1e-2 , rtol=1e-2 )
# fmt: off
a__ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
a__ = 'Simply put, the theory of relativity states that '
a__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , return_tensors='pt' )
a__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__SCREAMING_SNAKE_CASE )
# greedy generation outputs
a__ = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=6_4 , top_p=__SCREAMING_SNAKE_CASE , temperature=1 , do_sample=__SCREAMING_SNAKE_CASE )
a__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 273
| 0
|
def __lowerCAmelCase ( _UpperCamelCase : list[list[int | float]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(matrix[0] )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , _UpperCamelCase )
for row in range(_UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(_UpperCamelCase , _UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , _UpperCamelCase ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673
|
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673
| 1
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase : Union[str, Any] = 50003
lowercase : Optional[int] = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : List[str]= PLBartTokenizer
_a : List[str]= None
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
lowercase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Any = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 4 ,snake_case )]
self.assertListEqual(snake_case ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowercase : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Tuple = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""multi""" ,keep_accents=snake_case )
lowercase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : int = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 7 ,snake_case )]
self.assertListEqual(
snake_case ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowercase : int = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Optional[Any] = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
_a : str= "uclanlp/plbart-python-en_XX"
_a : Union[str, Any]= [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_a : List[Any]= [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_a : Optional[Any]= [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
lowercase : Union[str, Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,50003 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertIn(snake_case ,self.tokenizer.all_special_ids )
lowercase : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case )
self.assertEqual(snake_case ,snake_case )
self.assertNotIn(self.tokenizer.eos_token ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] ,snake_case )
lowercase : Tuple = 10
lowercase : List[str] = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,snake_case )
self.assertEqual(len(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[50004, 50001] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case )
lowercase : Optional[Any] = PLBartTokenizer.from_pretrained(snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" )
lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case ,snake_case )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
lowercase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" )
lowercase : str = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" )
lowercase : int = targets["""input_ids"""]
lowercase : Tuple = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(snake_case ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} ,)
| 336
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ =FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowerCAmelCase , cache_dir=_lowerCAmelCase )
UpperCAmelCase_ =[t[-1] for t in os.walk(os.path.join(_lowerCAmelCase , os.listdir(_lowerCAmelCase )[0] , "snapshots" ) )]
UpperCAmelCase_ =[item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.random.PRNGKey(0 )
UpperCAmelCase_ =4
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
UpperCAmelCase_ =pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowerCAmelCase ) == num_samples
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.random.PRNGKey(0 )
UpperCAmelCase_ =50
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase )
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.random.PRNGKey(0 )
UpperCAmelCase_ =50
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.random.PRNGKey(0 )
UpperCAmelCase_ =50
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
UpperCAmelCase_ =scheduler.create_state()
UpperCAmelCase_ =scheduler_state
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.random.PRNGKey(0 )
UpperCAmelCase_ =50
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
# shard inputs and rng
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =jax.random.split(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(_lowerCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =(
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ =jax.device_count()
UpperCAmelCase_ =num_samples * [prompt]
UpperCAmelCase_ =jax.random.split(jax.random.PRNGKey(0 ) , _lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , )
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ =images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ =FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=_lowerCAmelCase , use_memory_efficient_attention=_lowerCAmelCase , )
UpperCAmelCase_ =replicate(_lowerCAmelCase )
UpperCAmelCase_ =pipeline.prepare_inputs(_lowerCAmelCase )
UpperCAmelCase_ =shard(_lowerCAmelCase )
UpperCAmelCase_ =pipeline(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , jit=_lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ =images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 550
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowercase : Union[str, Any] =logging.get_logger(__name__)
class A ( __lowercase ):
_snake_case =['''input_features''', '''attention_mask''']
def __init__( self: Dict , _lowerCAmelCase: List[str]=80 , _lowerCAmelCase: Optional[Any]=1_6000 , _lowerCAmelCase: Any=0.0 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: Optional[Any]=25 , _lowerCAmelCase: Union[str, Any]="hamming_window" , _lowerCAmelCase: Optional[int]=3_27_68.0 , _lowerCAmelCase: Optional[Any]=0.97 , _lowerCAmelCase: Any=1.0 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Any=False , **_lowerCAmelCase: Any , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase_ =feature_size
UpperCAmelCase_ =sampling_rate
UpperCAmelCase_ =padding_value
UpperCAmelCase_ =hop_length
UpperCAmelCase_ =win_length
UpperCAmelCase_ =frame_signal_scale
UpperCAmelCase_ =preemphasis_coeff
UpperCAmelCase_ =mel_floor
UpperCAmelCase_ =normalize_means
UpperCAmelCase_ =normalize_vars
UpperCAmelCase_ =win_function
UpperCAmelCase_ =return_attention_mask
UpperCAmelCase_ =win_length * sampling_rate // 1000
UpperCAmelCase_ =hop_length * sampling_rate // 1000
UpperCAmelCase_ =optimal_fft_length(self.sample_size )
UpperCAmelCase_ =(self.n_fft // 2) + 1
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
UpperCAmelCase_ =window_function(window_length=self.sample_size , name=self.win_function , periodic=_lowerCAmelCase )
else:
UpperCAmelCase_ =window_function(window_length=self.sample_size , name=self.win_function )
UpperCAmelCase_ =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
UpperCAmelCase_ =spectrogram(
one_waveform * self.frame_signal_scale , window=_lowerCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=_lowerCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=_lowerCAmelCase , mel_floor=self.mel_floor , log_mel="log" , )
return msfc_features.T
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Any ) -> Any:
'''simple docstring'''
if self.normalize_means:
UpperCAmelCase_ =x[:input_length].mean(axis=0 )
UpperCAmelCase_ =np.subtract(_lowerCAmelCase , _lowerCAmelCase )
if self.normalize_vars:
UpperCAmelCase_ =x[:input_length].std(axis=0 )
UpperCAmelCase_ =np.divide(_lowerCAmelCase , _lowerCAmelCase )
if input_length < x.shape[0]:
UpperCAmelCase_ =padding_value
# make sure array is in float32
UpperCAmelCase_ =x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: List[np.ndarray] , _lowerCAmelCase: Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(_lowerCAmelCase , _lowerCAmelCase , self.padding_value ) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase )]
def __call__( self: int , _lowerCAmelCase: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCAmelCase: Union[bool, str, PaddingStrategy] = False , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[bool] = None , _lowerCAmelCase: Optional[Union[str, TensorType]] = None , _lowerCAmelCase: Optional[int] = None , **_lowerCAmelCase: List[Any] , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase_ =isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
UpperCAmelCase_ =is_batched_numpy or (
isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ):
UpperCAmelCase_ =np.asarray(_lowerCAmelCase , dtype=np.floataa )
elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ =[raw_speech]
# extract fbank features
UpperCAmelCase_ =[self._extract_mfsc_features(_lowerCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase_ =BatchFeature({"input_features": features} )
UpperCAmelCase_ =self.pad(
_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , )
# make sure list is in array format
UpperCAmelCase_ =padded_inputs.get("input_features" )
if isinstance(input_features[0] , _lowerCAmelCase ):
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase_ =padded_inputs.get("attention_mask" )
if attention_mask is not None:
UpperCAmelCase_ =[np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
UpperCAmelCase_ =(
np.array(_lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
UpperCAmelCase_ =self.normalize(
padded_inputs["input_features"] , attention_mask=_lowerCAmelCase )
if return_tensors is not None:
UpperCAmelCase_ =padded_inputs.convert_to_tensors(_lowerCAmelCase )
return padded_inputs
| 550
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 0
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase : List[str] =TypeVar("""KEY""")
__UpperCAmelCase : str =TypeVar("""VAL""")
@dataclass(frozen=_A , slots=_A )
class lowerCAmelCase__ ( Generic[KEY, VAL] ):
lowercase__ : Tuple = 42
lowercase__ : Union[str, Any] = 42
class lowerCAmelCase__ ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __bool__( self ):
'''simple docstring'''
return False
__UpperCAmelCase : str =_DeletedItem()
class lowerCAmelCase__ ( MutableMapping[KEY, VAL] ):
def __init__( self , UpperCamelCase__ = 8 , UpperCamelCase__ = 0.75 ):
'''simple docstring'''
A__ = initial_block_size
A__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A__ = capacity_factor
A__ = 0
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return hash(UpperCamelCase__ ) % len(self._buckets )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
A__ = self._buckets[ind]
if not stored:
A__ = _Item(UpperCamelCase__ , UpperCamelCase__ )
self._len += 1
return True
elif stored.key == key:
A__ = _Item(UpperCamelCase__ , UpperCamelCase__ )
return True
else:
return False
def lowercase_ ( self ):
'''simple docstring'''
A__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(UpperCamelCase__ )
def lowercase_ ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
A__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self._buckets
A__ = [None] * new_size
A__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowercase_ ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowercase_ ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = self._get_bucket_index(UpperCamelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
A__ = self._get_next_ind(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for ind in self._iterate_buckets(UpperCamelCase__ ):
if self._try_set(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
break
def __setitem__( self , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(UpperCamelCase__ , UpperCamelCase__ )
def __delitem__( self , UpperCamelCase__ ):
'''simple docstring'''
for ind in self._iterate_buckets(UpperCamelCase__ ):
A__ = self._buckets[ind]
if item is None:
raise KeyError(UpperCamelCase__ )
if item is _deleted:
continue
if item.key == key:
A__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , UpperCamelCase__ ):
'''simple docstring'''
for ind in self._iterate_buckets(UpperCamelCase__ ):
A__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(UpperCamelCase__ )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
A__ = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 710
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
__UpperCAmelCase =re.compile(r"""([A-Z]+)([A-Z][a-z])""")
__UpperCAmelCase =re.compile(r"""([a-z\d])([A-Z])""")
__UpperCAmelCase =re.compile(r"""(?<!_)_(?!_)""")
__UpperCAmelCase =re.compile(r"""(_{2,})""")
__UpperCAmelCase =r"""^\w+(\.\w+)*$"""
__UpperCAmelCase =r"""<>:/\|?*"""
def __a ( A ) -> Tuple:
'''simple docstring'''
A__ = _uppercase_uppercase_re.sub(R"\1_\2" , A )
A__ = _lowercase_uppercase_re.sub(R"\1_\2" , A )
return name.lower()
def __a ( A ) -> int:
'''simple docstring'''
A__ = _single_underscore_re.split(A )
A__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != "" )
def __a ( A ) -> Optional[Any]:
'''simple docstring'''
if os.path.basename(A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(A )
def __a ( A , A ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , A ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(A )}-{split}"""
def __a ( A , A , A , A=None ) -> List[Any]:
'''simple docstring'''
A__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
A__ = os.path.join(A , A )
return f"""{filepath}*"""
def __a ( A , A , A , A=None , A=None ) -> List[Any]:
'''simple docstring'''
A__ = filename_prefix_for_split(A , A )
A__ = os.path.join(A , A )
if shard_lengths:
A__ = len(A )
A__ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(A )]
if filetype_suffix:
A__ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
A__ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 261
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
def __init__( self : Optional[Any] , _A : List[Any] , _A : Union[str, Any]=13 , _A : List[Any]=10 , _A : Optional[int]=3 , _A : Optional[Any]=2 , _A : int=2 , _A : Optional[Any]=True , _A : str=True , _A : int=32 , _A : Tuple=5 , _A : Union[str, Any]=4 , _A : Tuple=37 , _A : Tuple="gelu" , _A : Union[str, Any]=0.1 , _A : List[str]=0.1 , _A : List[Any]=10 , _A : Optional[int]=0.02 , _A : Union[str, Any]="divided_space_time" , _A : Dict=None , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Dict = num_frames
UpperCAmelCase_ : List[str] = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = attention_type
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : str = scope
UpperCAmelCase_ : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase_ : Optional[int] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Any = (num_frames) * self.num_patches_per_frame + 1
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase_ : Dict = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def A ( self : int ) -> Any:
UpperCAmelCase_ : List[Any] = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCAmelCase_ : Tuple = self.num_labels
return config
def A ( self : str , _A : int , _A : str , _A : Optional[Any] ) -> int:
UpperCAmelCase_ : List[Any] = TimesformerModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Optional[int] , _A : int , _A : Optional[int] , _A : int ) -> str:
UpperCAmelCase_ : List[Any] = TimesformerForVideoClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
# verify the logits shape
UpperCAmelCase_ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _A )
def A ( self : Dict ) -> str:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs
UpperCAmelCase_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : Union[str, Any] = TimesformerModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(
self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Union[str, Any] , _A : str , _A : Dict , _A : str=False ) -> Optional[Any]:
UpperCAmelCase_ : int = copy.deepcopy(_A )
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def A ( self : Union[str, Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
def A ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : int = model_class(_A )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Optional[int] ) -> Dict:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_A )
@slow
def A ( self : Optional[int] ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TimesformerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
if not self.has_attentions:
pass
else:
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = True
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = self.model_tester.seq_length
UpperCAmelCase_ : Tuple = self.model_tester.num_frames
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Any = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Any = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCAmelCase_ : str = len(_A )
# Check attention is always last and order is fine
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[str] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Any = model(**self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + 1 , len(_A ) )
UpperCAmelCase_ : int = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def A ( self : Dict ) -> Union[str, Any]:
def check_hidden_states_output(_A : Union[str, Any] , _A : str , _A : int ):
UpperCAmelCase_ : Tuple = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Any = outputs.hidden_states
UpperCAmelCase_ : List[str] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_A ) , _A )
UpperCAmelCase_ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_A , _A , _A )
def __UpperCAmelCase ( ) -> List[str]:
UpperCAmelCase_ : Optional[int] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
UpperCAmelCase_ : str = np.load(A )
return list(A )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Tuple ) -> Tuple:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ) -> str:
UpperCAmelCase_ : List[str] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
_A )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_video()
UpperCAmelCase_ : Any = image_processor(video[:8] , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Tuple = model(**_A )
# verify the logits
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : str = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 541
|
'''simple docstring'''
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return lst
UpperCAmelCase_ : List[str] = 1
while i < len(A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = lst[i], lst[i - 1]
i -= 1
if i == 0:
UpperCAmelCase_ : Dict = 1
return lst
if __name__ == "__main__":
_UpperCamelCase : int = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 541
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
lowercase__ : Tuple = RobertaTokenizer
def __init__( self : Dict , lowercase : Optional[int]=None , lowercase : Union[str, Any]=None , lowercase : Tuple=None , lowercase : Optional[int]="replace" , lowercase : Union[str, Any]="<s>" , lowercase : Optional[Any]="</s>" , lowercase : Union[str, Any]="</s>" , lowercase : List[Any]="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : List[str]="<mask>" , lowercase : Optional[Any]=False , lowercase : Dict=True , **lowercase : str , ) -> Tuple:
"""simple docstring"""
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
__lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = getattr(lowercase , pre_tok_state.pop("""type""" ) )
__lowercase = add_prefix_space
__lowercase = pre_tok_class(**lowercase )
__lowercase = add_prefix_space
__lowercase = """post_processor"""
__lowercase = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
__lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowercase = tuple(state["""sep"""] )
if "cls" in state:
__lowercase = tuple(state["""cls"""] )
__lowercase = False
if state.get("""add_prefix_space""" , lowercase ) != add_prefix_space:
__lowercase = add_prefix_space
__lowercase = True
if state.get("""trim_offsets""" , lowercase ) != trim_offsets:
__lowercase = trim_offsets
__lowercase = True
if changes_to_apply:
__lowercase = getattr(lowercase , state.pop("""type""" ) )
__lowercase = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
def snake_case__ ( self : Any ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case__ ( self : Any , lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
__lowercase = value
def snake_case__ ( self : List[Any] , *lowercase : Tuple , **lowercase : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : Any , *lowercase : int , **lowercase : Optional[int] ) -> BatchEncoding:
"""simple docstring"""
__lowercase = kwargs.get("""is_split_into_words""" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def snake_case__ ( self : int , lowercase : str , lowercase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
__lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 634
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ (lowercase__ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
'''simple docstring'''
lowerCAmelCase__ = BeautifulSoup(requests.get(lowercase__ ).text , '''html.parser''' )
lowerCAmelCase__ = soup.findAll('''h1''' )
lowerCAmelCase__ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowercase__ , lowercase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 668
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=0.999 , UpperCamelCase_="cosine" , ) -> int:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE__ = []
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase_ ) / alpha_bar_fn(UpperCamelCase_ ) , UpperCamelCase_ ) )
return torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ):
A__ : Union[str, Any] =[e.name for e in KarrasDiffusionSchedulers]
A__ : Tuple =2
@register_to_config
def __init__( self : Any , UpperCAmelCase_ : int = 1000 , UpperCAmelCase_ : float = 0.00_085 , UpperCAmelCase_ : float = 0.012 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : str = "linspace" , UpperCAmelCase_ : int = 0 , ):
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(UpperCAmelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=None ):
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE__ = self.timesteps
SCREAMING_SNAKE_CASE__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE__ = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE__ = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
SCREAMING_SNAKE_CASE__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self : Optional[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[float, torch.FloatTensor] , ):
SCREAMING_SNAKE_CASE__ = self.index_for_timestep(UpperCAmelCase_ )
if self.state_in_first_order:
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
else:
SCREAMING_SNAKE_CASE__ = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None , UpperCAmelCase_ : Optional[int] = None , ):
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE__ = np.linspace(0 , num_train_timesteps - 1 , UpperCAmelCase_ , dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(UpperCAmelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.log(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.interp(UpperCAmelCase_ , np.arange(0 , len(UpperCAmelCase_ ) ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
# interpolate sigmas
SCREAMING_SNAKE_CASE__ = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
SCREAMING_SNAKE_CASE__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCAmelCase_ ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
# interpolate timesteps
SCREAMING_SNAKE_CASE__ = self.sigma_to_t(UpperCAmelCase_ ).to(UpperCAmelCase_ , dtype=timesteps.dtype )
SCREAMING_SNAKE_CASE__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
SCREAMING_SNAKE_CASE__ = torch.cat([timesteps[:1], interleaved_timesteps] )
SCREAMING_SNAKE_CASE__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE__ = defaultdict(UpperCAmelCase_ )
def A_ ( self : Any , UpperCAmelCase_ : Union[str, Any] ):
# get log sigma
SCREAMING_SNAKE_CASE__ = sigma.log()
# get distribution
SCREAMING_SNAKE_CASE__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
SCREAMING_SNAKE_CASE__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE__ = low_idx + 1
SCREAMING_SNAKE_CASE__ = self.log_sigmas[low_idx]
SCREAMING_SNAKE_CASE__ = self.log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE__ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE__ = w.clamp(0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE__ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE__ = t.view(sigma.shape )
return t
@property
def A_ ( self : Tuple ):
return self.sample is None
def A_ ( self : Tuple , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : Union[float, torch.FloatTensor] , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : bool = True , ):
SCREAMING_SNAKE_CASE__ = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE__ = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE__ = self.sigmas_interpol[step_index + 1]
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE__ = self.sigmas_interpol[step_index]
SCREAMING_SNAKE_CASE__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = sigma_hat if self.state_in_first_order else sigma_interpol
SCREAMING_SNAKE_CASE__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE__ = sigma_interpol - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
SCREAMING_SNAKE_CASE__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
SCREAMING_SNAKE_CASE__ = sigma_next - sigma_hat
SCREAMING_SNAKE_CASE__ = self.sample
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def A_ ( self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
SCREAMING_SNAKE_CASE__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE__ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE__ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ = [self.index_for_timestep(UpperCAmelCase_ , UpperCAmelCase_ ) for t in timesteps]
SCREAMING_SNAKE_CASE__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE__ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : int ):
return self.config.num_train_timesteps
| 709
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
def __init__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : List[Any]=[32, 64, 128] , UpperCAmelCase_ : Tuple=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Tuple=2.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[int]=8 , UpperCAmelCase_ : int=["stage1", "stage2"] , UpperCAmelCase_ : Optional[int]=[1, 2] , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = patch_norm
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = encoder_stride
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = out_indices
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Optional[int] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE__ = FocalNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
SCREAMING_SNAKE_CASE__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = FocalNetBackbone(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = FocalNetForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FocalNetForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Any =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
A__ : Union[str, Any] =(
{"""feature-extraction""": FocalNetModel, """image-classification""": FocalNetForImageClassification}
if is_torch_available()
else {}
)
A__ : Union[str, Any] =False
A__ : Dict =False
A__ : Optional[Any] =False
A__ : str =False
A__ : Any =False
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = FocalNetModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 , has_text_modality=UpperCAmelCase_ )
def A_ ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Union[str, Any] ):
return
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def A_ ( self : Tuple ):
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def A_ ( self : Dict ):
pass
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE__ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = outputs.hidden_states
SCREAMING_SNAKE_CASE__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# FocalNet has a different seq_length
SCREAMING_SNAKE_CASE__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
SCREAMING_SNAKE_CASE__ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = reshaped_hidden_states[0].shape
SCREAMING_SNAKE_CASE__ = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
SCREAMING_SNAKE_CASE__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
SCREAMING_SNAKE_CASE__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
SCREAMING_SNAKE_CASE__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
@slow
def A_ ( self : int ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FocalNetModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def A_ ( self : Any ):
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(FocalNetBackbone,) if is_torch_available() else ()
A__ : Tuple =FocalNetConfig
A__ : Optional[int] =False
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = FocalNetModelTester(self )
| 400
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE_:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE_:int = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
SCREAMING_SNAKE_CASE_:Dict = {
'''allenai/led-base-16384''': 16_384,
}
class SCREAMING_SNAKE_CASE__ ( _lowercase ):
'''simple docstring'''
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Dict = LEDTokenizer
__lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__="replace", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__="</s>", lowerCamelCase__="<s>", lowerCamelCase__="<unk>", lowerCamelCase__="<pad>", lowerCamelCase__="<mask>", lowerCamelCase__=False, lowerCamelCase__=True, **lowerCamelCase__, ):
super().__init__(
__A, __A, tokenizer_file=__A, errors=__A, bos_token=__A, eos_token=__A, sep_token=__A, cls_token=__A, unk_token=__A, pad_token=__A, mask_token=__A, add_prefix_space=__A, trim_offsets=__A, **__A, )
A : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", __A ) != add_prefix_space:
A : int = getattr(__A, pre_tok_state.pop("""type""" ) )
A : Optional[Any] = add_prefix_space
A : Optional[int] = pre_tok_class(**__A )
A : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A : Optional[int] = """post_processor"""
A : Tuple = getattr(self.backend_tokenizer, __A, __A )
if tokenizer_component_instance:
A : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A : Dict = tuple(state["""sep"""] )
if "cls" in state:
A : Optional[Any] = tuple(state["""cls"""] )
A : Optional[Any] = False
if state.get("""add_prefix_space""", __A ) != add_prefix_space:
A : Tuple = add_prefix_space
A : Union[str, Any] = True
if state.get("""trim_offsets""", __A ) != trim_offsets:
A : int = trim_offsets
A : Optional[Any] = True
if changes_to_apply:
A : Union[str, Any] = getattr(__A, state.pop("""type""" ) )
A : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer, __A, __A )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCAmelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCAmelCase ( self, lowerCamelCase__ ):
A : Optional[Any] = AddedToken(__A, lstrip=__A, rstrip=__A ) if isinstance(__A, __A ) else value
A : List[Any] = value
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Any = kwargs.get("""is_split_into_words""", __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A, **__A )
def _lowerCAmelCase ( self, *lowerCamelCase__, **lowerCamelCase__ ):
A : Tuple = kwargs.get("""is_split_into_words""", __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A, **__A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : List[Any] = self._tokenizer.model.save(__A, name=__A )
return tuple(__A )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Dict = [self.sep_token_id]
A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD, lowerCamelCase__ = None, lowerCamelCase__ = None, ):
A : Optional[Any] = super()._pad(
encoded_inputs=__A, max_length=__A, padding_strategy=__A, pad_to_multiple_of=__A, return_attention_mask=__A, )
# Load from model defaults
if return_attention_mask is None:
A : Dict = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A : Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(__A )
if needs_to_be_padded:
A : Any = len(__A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A : int = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A : List[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 662
|
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : List[Any] = {
"configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"],
"tokenization_roc_bert": ["RoCBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
"ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoCBertForCausalLM",
"RoCBertForMaskedLM",
"RoCBertForMultipleChoice",
"RoCBertForPreTraining",
"RoCBertForQuestionAnswering",
"RoCBertForSequenceClassification",
"RoCBertForTokenClassification",
"RoCBertLayer",
"RoCBertModel",
"RoCBertPreTrainedModel",
"load_tf_weights_in_roc_bert",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class SCREAMING_SNAKE_CASE:
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
__lowercase = vocab_size - 1
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.prepare_config_and_inputs()
__lowercase = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__lowercase = GPTNeoXModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForQuestionAnswering(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = GPTNeoXForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = GPTNeoXForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# first forward pass
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
__lowercase = output_from_no_past["""hidden_states"""][0]
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , )["""hidden_states"""][0]
# select random slice
__lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( __A , __A , __A , unittest.TestCase ):
snake_case_ : Optional[int] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case_ : str = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ : List[Any] = False
snake_case_ : Tuple = False
snake_case_ : Any = False
snake_case_ : int = False
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = GPTNeoXModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=64 , num_attention_heads=8 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase ,__lowercase ,__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = ids_tensor([1, 10] , config.vocab_size )
__lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = GPTNeoXModel(lowerCamelCase__ )
original_model.to(lowerCamelCase__ )
original_model.eval()
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
__lowercase = original_model(lowerCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__lowercase = {"""type""": scaling_type, """factor""": 10.0}
__lowercase = GPTNeoXModel(lowerCamelCase__ )
scaled_model.to(lowerCamelCase__ )
scaled_model.eval()
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
__lowercase = scaled_model(lowerCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-5 ) )
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCamelCase__ )
__lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__lowercase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__lowercase = model.generate(**lowerCamelCase__ , do_sample=lowerCamelCase__ , max_new_tokens=20 )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 163
| 0
|
from math import sqrt
def UpperCamelCase_ ( __a = 1_000_000 ) -> int:
a__ : int = 0
a__ : int = 0
a__ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__a , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 37
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int]=100 , lowerCamelCase__ : str=13 , lowerCamelCase__ : Optional[int]=30 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : int=32 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Union[str, Any]=10 , lowerCamelCase__ : str=0.02 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=[0, 1, 2, 3] , ):
a__ : Dict = parent
a__ : Dict = 100
a__ : Optional[int] = batch_size
a__ : Union[str, Any] = image_size
a__ : Any = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : Optional[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : str = num_attention_heads
a__ : str = intermediate_size
a__ : int = hidden_act
a__ : List[Any] = hidden_dropout_prob
a__ : Dict = attention_probs_dropout_prob
a__ : Union[str, Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : List[str] = scope
a__ : int = out_indices
a__ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[int] = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def _UpperCamelCase( self : int ):
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[Any] = None
a__ : Tuple = None
if self.use_labels:
a__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase( self : Tuple ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any ):
a__ : str = BeitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ):
a__ : int = BeitForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _UpperCamelCase( self : str , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
a__ : List[str] = self.type_sequence_label_size
a__ : Optional[Any] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : Optional[Any] = 1
a__ : List[str] = BeitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ):
a__ : int = self.num_labels
a__ : List[str] = BeitForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
a__ : Tuple = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a__ : str = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _UpperCamelCase( self : Optional[int] ):
a__ : Any = self.prepare_config_and_inputs()
a__, a__, a__, a__ : Union[str, Any] = config_and_inputs
a__ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowercase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def _UpperCamelCase( self : Any ):
a__ : int = BeitModelTester(self )
a__ : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def _UpperCamelCase( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _UpperCamelCase( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _UpperCamelCase( self : Dict ):
pass
def _UpperCamelCase( self : Optional[Any] ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def _UpperCamelCase( self : str ):
a__, a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowerCamelCase__ )
a__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _UpperCamelCase( self : int ):
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] ):
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] ):
if not self.model_tester.is_training:
return
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]:
continue
a__ : List[str] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
a__ : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : Tuple = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : Tuple ):
a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ : List[Any] = False
a__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a__ : Optional[Any] = model_class(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase__ )
model.train()
a__ : Union[str, Any] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
a__ : int = model(**lowerCamelCase__ ).loss
loss.backward()
def _UpperCamelCase( self : List[str] ):
a__, a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Dict = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
a__ : str = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _UpperCamelCase( self : Optional[int] ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = BeitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCamelCase_ ( ) -> Any:
a__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCamelCase( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _UpperCamelCase( self : str ):
a__ : int = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(lowerCamelCase__ )
a__ : Optional[Any] = self.default_image_processor
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).pixel_values.to(lowerCamelCase__ )
# prepare bool_masked_pos
a__ : Optional[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Any = model(pixel_values=lowerCamelCase__ , bool_masked_pos=lowerCamelCase__ )
a__ : Tuple = outputs.logits
# verify the logits
a__ : List[str] = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[int] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase__ , atol=1E-2 ) )
@slow
def _UpperCamelCase( self : Dict ):
a__ : str = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(lowerCamelCase__ )
a__ : int = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Union[str, Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : Any ):
a__ : Dict = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
lowerCamelCase__ )
a__ : str = self.default_image_processor
a__ : List[str] = prepare_img()
a__ : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Optional[int] = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : Optional[Any] = torch.tensor([1.6881, -0.2787, 0.5901] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
a__ : Optional[Any] = 2_396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase__ )
@slow
def _UpperCamelCase( self : int ):
a__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : Tuple = model.to(lowerCamelCase__ )
a__ : List[Any] = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : Union[str, Any] = Image.open(ds[0]["file"] )
a__ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowerCamelCase__ )
a__ : List[str] = outputs.logits
# verify the logits
a__ : Tuple = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase__ )
a__ : int = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
a__ : Dict = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=lowerCamelCase__ , )
else:
a__ : Dict = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCamelCase( self : Tuple ):
a__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
a__ : List[Any] = model.to(lowerCamelCase__ )
a__ : int = BeitImageProcessor(do_resize=lowerCamelCase__ , size=640 , do_center_crop=lowerCamelCase__ )
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a__ : str = Image.open(ds[0]["file"] )
a__ : str = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
a__ : List[Any] = model(**lowerCamelCase__ )
a__ : Any = outputs.logits.detach().cpu()
a__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(500, 300)] )
a__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
a__ : Any = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 37
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Optional[int]=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=9_9 , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Optional[Any]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=3_7 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Tuple=None , ):
"""simple docstring"""
a : Dict = parent
a : Tuple = batch_size
a : Any = seq_length
a : int = is_training
a : Dict = use_input_mask
a : Optional[Any] = use_token_type_ids
a : int = use_labels
a : Optional[int] = vocab_size
a : Dict = hidden_size
a : Any = num_hidden_layers
a : str = num_attention_heads
a : int = intermediate_size
a : int = hidden_act
a : Dict = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Any = type_vocab_size
a : Any = type_sequence_label_size
a : int = initializer_range
a : str = num_labels
a : Optional[Any] = num_choices
a : Any = scope
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length])
a : List[str] = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : List[Any] = None
a : Dict = None
a : Optional[Any] = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = NystromformerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : int = NystromformerForMaskedLM(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = NystromformerForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[int] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Any = self.num_labels
a : Union[str, Any] = NystromformerForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[Any] = self.num_labels
a : List[Any] = NystromformerForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : List[Any] = self.num_choices
a : Optional[Any] = NystromformerForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[Any] = self.prepare_config_and_inputs()
(
a
) : Any = config_and_inputs
a : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
A : Optional[int] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
A : List[str] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = NystromformerModelTester(self)
a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : List[str] = type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = NystromformerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Dict = NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
a : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
a : List[str] = model(UpperCAmelCase_)[0]
a : Union[str, Any] = torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape , UpperCAmelCase_)
a : Optional[Any] = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = 'the [MASK] of Belgium is Brussels'
a : str = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
a : Any = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
a : Optional[Any] = tokenizer(UpperCAmelCase_ , return_tensors='pt')
with torch.no_grad():
a : Dict = model(encoding.input_ids).logits
a : str = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase_) , 'capital')
| 715
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : int = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
UpperCamelCase : List[str] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
UpperCamelCase : Tuple = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a : Optional[Any] = simple_accuracy(snake_case , snake_case )
a : Dict = float(fa_score(y_true=snake_case , y_pred=snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = np.array(snake_case )
a : Any = np.array(snake_case )
a : Tuple = en_sentvecs.shape[0]
# mean centering
a : Tuple = en_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[Any] = in_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[int] = cdist(snake_case , snake_case , 'cosine' )
a : List[Any] = np.array(range(snake_case ) )
a : str = sim.argsort(axis=1 )[:, :10]
a : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 610
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
def a__ ( snake_case , snake_case=0.999 , snake_case="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__SCREAMING_SNAKE_CASE : Any = []
for i in range(snake_case ):
__SCREAMING_SNAKE_CASE : int = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , _A : int = 1000 , _A : str = "fixed_small_log" , _A : bool = True , _A : Optional[float] = 1.0 , _A : str = "epsilon" , _A : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
__SCREAMING_SNAKE_CASE : List[Any] = betas_for_alpha_bar(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : List[Any] = torch.cumprod(self.alphas , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE : Tuple = 1.0
# setable values
__SCREAMING_SNAKE_CASE : Any = None
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.arange(0 , _A )[::-1].copy() )
__SCREAMING_SNAKE_CASE : Tuple = variance_type
def UpperCAmelCase__ ( self : int , _A : torch.FloatTensor , _A : Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCAmelCase__ ( self : int , _A : int , _A : Union[str, torch.device] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = num_inference_steps
__SCREAMING_SNAKE_CASE : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_A ).to(_A )
def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : List[str]=None , _A : List[Any]=None , _A : List[str]=None ):
"""simple docstring"""
if prev_timestep is None:
__SCREAMING_SNAKE_CASE : Optional[int] = t - 1
__SCREAMING_SNAKE_CASE : str = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE : List[Any] = self.betas[t]
else:
__SCREAMING_SNAKE_CASE : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__SCREAMING_SNAKE_CASE : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__SCREAMING_SNAKE_CASE : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE : Any = torch.log(torch.clamp(_A , min=1e-20 ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__SCREAMING_SNAKE_CASE : Optional[Any] = variance.log()
__SCREAMING_SNAKE_CASE : Optional[int] = beta.log()
__SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2
__SCREAMING_SNAKE_CASE : str = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self : Dict , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : Optional[int] = None , _A : List[str]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , sample.shape[1] , dim=1 )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
__SCREAMING_SNAKE_CASE : List[str] = t - 1
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__SCREAMING_SNAKE_CASE : str = self.betas[t]
__SCREAMING_SNAKE_CASE : List[Any] = self.alphas[t]
else:
__SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
__SCREAMING_SNAKE_CASE : Any = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE : List[str] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE : Dict = torch.clamp(
_A , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__SCREAMING_SNAKE_CASE : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__SCREAMING_SNAKE_CASE : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__SCREAMING_SNAKE_CASE : List[str] = 0
if t > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device )
__SCREAMING_SNAKE_CASE : Any = self._get_variance(
_A , predicted_variance=_A , prev_timestep=_A , )
if self.variance_type == "fixed_small_log":
__SCREAMING_SNAKE_CASE : List[Any] = variance
elif self.variance_type == "learned_range":
__SCREAMING_SNAKE_CASE : str = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
__SCREAMING_SNAKE_CASE : List[Any] = variance * variance_noise
__SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.IntTensor , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
__SCREAMING_SNAKE_CASE : Dict = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : Any = sqrt_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__SCREAMING_SNAKE_CASE : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 74
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str ={
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =[
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a= [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def _UpperCamelCase ( _a : Dict ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
__UpperCamelCase : Union[str, Any] = k.replace(_a , _a )
return k
def _UpperCamelCase ( _a : dict , _a : dict ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = DEFAULTS.copy()
cfg_kwargs.update(_a )
__UpperCamelCase : Any = PegasusConfig(**_a )
__UpperCamelCase : List[str] = PegasusForConditionalGeneration(_a )
__UpperCamelCase : int = torch_model.model.state_dict()
__UpperCamelCase : List[Any] = {}
for k, v in tf_weights.items():
__UpperCamelCase : List[str] = rename_state_dict_key(_a )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__UpperCamelCase : Optional[Any] = v.T
__UpperCamelCase : List[Any] = torch.tensor(_a , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__UpperCamelCase : Any = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
__UpperCamelCase : Any = mapping['shared.weight']
__UpperCamelCase : Optional[int] = mapping['shared.weight']
__UpperCamelCase : Dict = {k: torch.zeros_like(_a ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**_a )
__UpperCamelCase , __UpperCamelCase : Optional[int] = torch_model.model.load_state_dict(_a , strict=_a )
__UpperCamelCase : Any = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCamelCase ( _a : int="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
__UpperCamelCase : str = tf.train.list_variables(_a )
__UpperCamelCase : str = {}
__UpperCamelCase : Optional[Any] = ['Adafactor', 'global_step']
for name, shape in tqdm(_a , desc='converting tf checkpoint to dict' ):
__UpperCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__UpperCamelCase : Tuple = tf.train.load_variable(_a , _a )
__UpperCamelCase : List[str] = array
return tf_weights
def _UpperCamelCase ( _a : str , _a : str ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = Path(_a ).parent.name
__UpperCamelCase : List[str] = task_specific_params[f"""summarization_{dataset}"""]['max_position_embeddings']
__UpperCamelCase : Dict = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=_a )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_a )
# convert model
__UpperCamelCase : int = get_tf_weights_as_numpy(_a )
__UpperCamelCase : List[str] = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
__UpperCamelCase : Optional[int] = task_specific_params
__UpperCamelCase : int = convert_pegasus(_a , _a )
torch_model.save_pretrained(_a )
__UpperCamelCase : int = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(_a , Path(_a ) / 'pytorch_model.bin' )
if __name__ == "__main__":
a= argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a= parser.parse_args()
if args.save_dir is None:
a= Path(args.tf_ckpt_path).parent.name
a= os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 287
|
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
def lowerCAmelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
__UpperCamelCase : List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__UpperCamelCase : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w', 'e r</w>', '']
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def lowerCAmelCase ( self , _lowerCamelCase ):
return "lower newer", "lower newer"
def lowerCAmelCase ( self ):
__UpperCamelCase : Dict = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
__UpperCamelCase : List[Any] = 'lower'
__UpperCamelCase : Union[str, Any] = ['low', 'er</w>']
__UpperCamelCase : Tuple = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
__UpperCamelCase : List[str] = tokens + ['<unk>']
__UpperCamelCase : Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCAmelCase ( self , _lowerCamelCase=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# Simple input
__UpperCamelCase : int = 'This is a simple input'
__UpperCamelCase : Any = ['This is a simple input 1', 'This is a simple input 2']
__UpperCamelCase : Union[str, Any] = ('This is a simple input', 'This is a pair')
__UpperCamelCase : Any = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(_lowerCamelCase , tokenizer_r.encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
_lowerCamelCase , tokenizer_r.batch_encode_plus , _lowerCamelCase , max_length=_lowerCamelCase , padding='max_length' , )
def lowerCAmelCase ( self ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
pass
| 287
| 1
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return EnvironmentCommand()
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class _a ( lowerCAmelCase):
"""simple docstring"""
@staticmethod
def lowercase__ ( __UpperCamelCase : ArgumentParser )->Tuple:
_UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=__UpperCamelCase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=__UpperCamelCase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : Optional[Any] , __UpperCamelCase : Any , *__UpperCamelCase : Any )->None:
_UpperCAmelCase = accelerate_config_file
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
_UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_UpperCAmelCase = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = _UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCamelCase ):
_UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCAmelCase = (
'''\n'''.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else F'\t{accelerate_config}'
)
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
_UpperCAmelCase = torch.__version__
_UpperCAmelCase = torch.cuda.is_available()
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
_UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
_UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = '''not installed'''
_UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCAmelCase = flax.__version__
_UpperCAmelCase = jax.__version__
_UpperCAmelCase = jaxlib.__version__
_UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
_UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'{safetensors_version}',
'''Accelerate version''': F'{accelerate_version}',
'''Accelerate config''': F'{accelerate_config_str}',
'''PyTorch version (GPU?)''': F'{pt_version} ({pt_cuda_available})',
'''Tensorflow version (GPU?)''': F'{tf_version} ({tf_cuda_available})',
'''Flax version (CPU?/GPU?/TPU?)''': F'{flax_version} ({jax_backend})',
'''Jax version''': F'{jax_version}',
'''JaxLib version''': F'{jaxlib_version}',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(__UpperCamelCase ) )
return info
@staticmethod
def lowercase__ ( __UpperCamelCase : Optional[int] )->Dict:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 602
|
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__A : List[str] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
__A : int = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
def remove_articles(_SCREAMING_SNAKE_CASE : Optional[int] ):
_UpperCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE : Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 100
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase = scount * numref
_UpperCAmelCase = Counter(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase = ccount * numref
# KEEP
_UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase = keepgramcounter_rep & rgramcounter
_UpperCAmelCase = sgramcounter_rep & rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase = delgramcounter_rep - rgramcounter
_UpperCAmelCase = sgramcounter_rep - rgramcounter
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE )
# ADDITION
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase = 1
_UpperCAmelCase = 1
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ssent.split(''' ''' )
_UpperCAmelCase = csent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rsent in rsents:
_UpperCAmelCase = rsent.split(''' ''' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
ragramslist.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_SCREAMING_SNAKE_CASE )
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 2:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_SCREAMING_SNAKE_CASE )
if i < len(_SCREAMING_SNAKE_CASE ) - 3:
_UpperCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True ):
'''simple docstring'''
if lowercase:
_UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
_UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
_UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = sentence
if not return_str:
_UpperCAmelCase = normalized_sent.split()
return normalized_sent
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_UpperCAmelCase = 0
for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] )
_UpperCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE )
return 100 * sari_score
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str="exp" , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : List[str]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , ):
'''simple docstring'''
_UpperCAmelCase = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
_UpperCAmelCase = sacrebleu.corpus_bleu(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : Dict )->Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def lowercase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] )->Any:
_UpperCAmelCase = {}
result.update({'''sari''': compute_sari(sources=__UpperCamelCase , predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCamelCase , references=__UpperCamelCase )} )
return result
| 602
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCAmelCase ( ):
lowercase__ = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
lowercase__ = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=SCREAMING_SNAKE_CASE_ )
env_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
launch_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
tpu_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
test_command_parser(subparsers=SCREAMING_SNAKE_CASE_ )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 37
| 0
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 46
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> Tuple:
super().__init__(**lowercase )
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 256}
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase = get_size_dict(lowercase )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> str:
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Dict:
lowerCAmelCase = get_size_dict(lowercase )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase = None , **lowercase ) -> Dict:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> List[Any]:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _snake_case ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> Any:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowercase , default_to_square=lowercase )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowercase )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 716
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase :
_SCREAMING_SNAKE_CASE = PegasusConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=40 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = eos_token_id
lowerCAmelCase = pad_token_id
lowerCAmelCase = bos_token_id
def _snake_case ( self ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def _snake_case ( self , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = TFPegasusModel(config=lowercase ).get_decoder()
lowerCAmelCase = inputs_dict["""input_ids"""]
lowerCAmelCase = input_ids[:1, :]
lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
lowerCAmelCase = inputs_dict["""head_mask"""]
lowerCAmelCase = 1
# first forward pass
lowerCAmelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
lowerCAmelCase , lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase = model(lowercase , attention_mask=lowercase )[0]
lowerCAmelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = TFPegasusModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase )
def _snake_case ( self ) -> Dict:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
_SCREAMING_SNAKE_CASE = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
_SCREAMING_SNAKE_CASE = 'google/pegasus-xsum'
@cached_property
def _snake_case ( self ) -> str:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case ( self , **lowercase ) -> List[str]:
lowerCAmelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def _snake_case ( self , **lowercase ) -> Optional[Any]:
lowerCAmelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def _snake_case ( self ) -> str:
self._assert_generated_batch_equal_expected()
| 393
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
lowercase : List[str] = 'facebook/bart-large-mnli'
lowercase : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
lowercase : int = 'text_classifier'
lowercase : Tuple = AutoTokenizer
lowercase : str = AutoModelForSequenceClassification
lowercase : int = ['text', ['text']]
lowercase : List[str] = ['text']
def a_ ( self ):
super().setup()
UpperCamelCase : Optional[int] = self.model.config
UpperCamelCase : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCamelCase : Union[str, Any] = int(UpperCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = labels
return self.pre_processor(
[text] * len(UpperCAmelCase_ ) , [f'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[Any] = outputs.logits
UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 499
|
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = CustomTokenizer
pass
| 59
| 0
|
from collections.abc import Callable
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =a
UpperCAmelCase_ =b
if function(lowercase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase__ ) == 0:
return b
elif (
function(lowercase__ ) * function(lowercase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
UpperCAmelCase_ =start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(lowercase__ ) == 0:
return mid
elif function(lowercase__ ) * function(lowercase__ ) < 0:
UpperCAmelCase_ =mid
else:
UpperCAmelCase_ =mid
UpperCAmelCase_ =start + (end - start) / 2.0
return mid
def a__ ( lowercase__ ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 717
|
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase : Optional[int] ="""python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def a__ ( lowercase__ , lowercase__=None ):
'''simple docstring'''
require_version(deps[pkg] , lowercase__ )
| 550
| 0
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , ):
"""simple docstring"""
_snake_case : List[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_snake_case : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
_snake_case : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_snake_case : Union[str, Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A_ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 609
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """SpeechT5FeatureExtractor"""
__SCREAMING_SNAKE_CASE = """SpeechT5Tokenizer"""
def __init__( self : List[Any] , a_ : str , a_ : str ):
"""simple docstring"""
super().__init__(a_ , a_ )
def __call__( self : Dict , *a_ : Tuple , **a_ : List[str] ):
"""simple docstring"""
__snake_case = kwargs.pop("audio" , a_ )
__snake_case = kwargs.pop("text" , a_ )
__snake_case = kwargs.pop("text_target" , a_ )
__snake_case = kwargs.pop("audio_target" , a_ )
__snake_case = kwargs.pop("sampling_rate" , a_ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
__snake_case = self.feature_extractor(a_ , *a_ , sampling_rate=a_ , **a_ )
elif text is not None:
__snake_case = self.tokenizer(a_ , **a_ )
else:
__snake_case = None
if audio_target is not None:
__snake_case = self.feature_extractor(audio_target=a_ , *a_ , sampling_rate=a_ , **a_ )
__snake_case = targets["input_values"]
elif text_target is not None:
__snake_case = self.tokenizer(a_ , **a_ )
__snake_case = targets["input_ids"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def A ( self : List[str] , *a_ : str , **a_ : Dict ):
"""simple docstring"""
__snake_case = kwargs.pop("input_values" , a_ )
__snake_case = kwargs.pop("input_ids" , a_ )
__snake_case = kwargs.pop("labels" , a_ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
__snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ )
elif input_ids is not None:
__snake_case = self.tokenizer.pad(a_ , **a_ )
else:
__snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(a_ , a_ ) and "input_ids" in labels[0]):
__snake_case = self.tokenizer.pad(a_ , **a_ )
__snake_case = targets["input_ids"]
else:
__snake_case = self.feature_extractor.feature_size
__snake_case = self.feature_extractor.num_mel_bins
__snake_case = self.feature_extractor.pad(a_ , *a_ , **a_ )
__snake_case = feature_size_hack
__snake_case = targets["input_values"]
else:
__snake_case = None
if inputs is None:
return targets
if targets is not None:
__snake_case = labels
__snake_case = targets.get("attention_mask" )
if decoder_attention_mask is not None:
__snake_case = decoder_attention_mask
return inputs
def A ( self : List[str] , *a_ : Any , **a_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A ( self : Optional[int] , *a_ : Union[str, Any] , **a_ : str ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
| 69
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
_A = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
_A = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowercase_ ( A__ ) -> Optional[Any]:
"""simple docstring"""
snake_case = (images / 2 + 0.5).clamp(0 , 1 )
snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = numpy_to_pil(A__ )
return images
def lowercase_ ( A__ ) -> Tuple:
"""simple docstring"""
if images.ndim == 3:
snake_case = images[None, ...]
snake_case = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
snake_case = [Image.fromarray(A__ ) for image in images]
return pil_images
| 294
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase_ ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
if isinstance(A__ , torch.Tensor ):
return image
elif isinstance(A__ , PIL.Image.Image ):
snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
snake_case = np.concatenate(A__ , axis=0 )
snake_case = np.array(A__ ).astype(np.floataa ) / 255.0
snake_case = image.transpose(0 , 3 , 1 , 2 )
snake_case = 2.0 * image - 1.0
snake_case = torch.from_numpy(A__ )
elif isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(A__ , dim=0 )
return image
def lowercase_ ( A__ , A__ , A__ , A__=0.9995 ) -> List[Any]:
"""simple docstring"""
if not isinstance(A__ , np.ndarray ):
snake_case = True
snake_case = va.device
snake_case = va.cpu().numpy()
snake_case = va.cpu().numpy()
snake_case = np.sum(va * va / (np.linalg.norm(A__ ) * np.linalg.norm(A__ )) )
if np.abs(A__ ) > DOT_THRESHOLD:
snake_case = (1 - t) * va + t * va
else:
snake_case = np.arccos(A__ )
snake_case = np.sin(A__ )
snake_case = theta_a * t
snake_case = np.sin(A__ )
snake_case = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case = sin_theta_t / sin_theta_a
snake_case = sa * va + sa * va
if inputs_are_torch:
snake_case = torch.from_numpy(A__ ).to(A__ )
return va
def lowercase_ ( A__ , A__ ) -> Any:
"""simple docstring"""
snake_case = F.normalize(A__ , dim=-1 )
snake_case = F.normalize(A__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for param in model.parameters():
snake_case = value
class lowerCamelCase ( A_ ):
def __init__(self : str , _A : AutoencoderKL , _A : CLIPTextModel , _A : CLIPModel , _A : CLIPTokenizer , _A : UNetaDConditionModel , _A : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _A : CLIPFeatureExtractor , _A : List[Any]=None , _A : Tuple=None , _A : Union[str, Any]=None , ) -> Dict:
super().__init__()
self.register_modules(
vae=_A , text_encoder=_A , clip_model=_A , tokenizer=_A , unet=_A , scheduler=_A , feature_extractor=_A , coca_model=_A , coca_tokenizer=_A , coca_transform=_A , )
snake_case = (
feature_extractor.size
if isinstance(feature_extractor.size , _A )
else feature_extractor.size["shortest_edge"]
)
snake_case = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _A )
set_requires_grad(self.clip_model , _A )
def UpperCAmelCase(self : List[Any] , _A : Optional[Union[str, int]] = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def UpperCAmelCase(self : str ) -> Any:
self.enable_attention_slicing(_A )
def UpperCAmelCase(self : List[Any] ) -> int:
set_requires_grad(self.vae , _A )
def UpperCAmelCase(self : Any ) -> Union[str, Any]:
set_requires_grad(self.vae , _A )
def UpperCAmelCase(self : Optional[Any] ) -> int:
set_requires_grad(self.unet , _A )
def UpperCAmelCase(self : str ) -> Tuple:
set_requires_grad(self.unet , _A )
def UpperCAmelCase(self : Tuple , _A : str , _A : str , _A : List[str] ) -> Dict:
# get the original timestep using init_timestep
snake_case = min(int(num_inference_steps * strength ) , _A )
snake_case = max(num_inference_steps - init_timestep , 0 )
snake_case = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase(self : List[Any] , _A : Dict , _A : List[Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=None ) -> str:
if not isinstance(_A , torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(_A )}' )
snake_case = image.to(device=_A , dtype=_A )
if isinstance(_A , _A ):
snake_case = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_A )
]
snake_case = torch.cat(_A , dim=0 )
else:
snake_case = self.vae.encode(_A ).latent_dist.sample(_A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 0.1_82_15 * init_latents
snake_case = init_latents.repeat_interleave(_A , dim=0 )
snake_case = randn_tensor(init_latents.shape , generator=_A , device=_A , dtype=_A )
# get latents
snake_case = self.scheduler.add_noise(_A , _A , _A )
snake_case = init_latents
return latents
def UpperCAmelCase(self : Dict , _A : Optional[int] ) -> Union[str, Any]:
snake_case = self.coca_transform(_A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def UpperCAmelCase(self : int , _A : Union[str, Any] , _A : Optional[Any] ) -> Tuple:
snake_case = self.feature_extractor.preprocess(_A )
snake_case = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case = self.clip_model.get_image_features(_A )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_A )
snake_case = image_embeddings_clip.repeat_interleave(_A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase(self : Union[str, Any] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : Any , _A : str , ) -> List[Any]:
snake_case = latents.detach().requires_grad_()
snake_case = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
snake_case = self.unet(_A , _A , encoder_hidden_states=_A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case = self.scheduler.alphas_cumprod[timestep]
snake_case = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case = torch.sqrt(_A )
snake_case = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _A ):
snake_case = self.scheduler.sigmas[index]
snake_case = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.1_82_15 * sample
snake_case = self.vae.decode(_A ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = transforms.Resize(self.feature_extractor_size )(_A )
snake_case = self.normalize(_A ).to(latents.dtype )
snake_case = self.clip_model.get_image_features(_A )
snake_case = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_A )
snake_case = spherical_dist_loss(_A , _A ).mean() * clip_guidance_scale
snake_case = -torch.autograd.grad(_A , _A )[0]
if isinstance(self.scheduler , _A ):
snake_case = latents.detach() + grads * (sigma**2)
snake_case = noise_pred_original
else:
snake_case = noise_pred_original - torch.sqrt(_A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self : str , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : Union[torch.FloatTensor, PIL.Image.Image] , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[int] = 5_1_2 , _A : Optional[int] = 5_1_2 , _A : float = 0.6 , _A : Optional[int] = 5_0 , _A : Optional[float] = 7.5 , _A : Optional[int] = 1 , _A : float = 0.0 , _A : Optional[float] = 1_0_0 , _A : Optional[torch.Generator] = None , _A : Optional[str] = "pil" , _A : bool = True , _A : float = 0.8 , _A : float = 0.1 , _A : float = 0.1 , ) -> Any:
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(_A )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(_A , torch.Generator ) and batch_size > 1:
snake_case = [generator] + [None] * (batch_size - 1)
snake_case = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
snake_case = [x[0] for x in coca_is_none if x[1]]
snake_case = ", ".join(_A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_A ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
snake_case = self.get_image_description(_A )
if style_prompt is None:
if len(_A ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
snake_case = self.get_image_description(_A )
# get prompt text embeddings for content and style
snake_case = self.tokenizer(
_A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_A , return_tensors="pt" , )
snake_case = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case = self.tokenizer(
_A , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=_A , return_tensors="pt" , )
snake_case = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case = slerp(_A , _A , _A )
# duplicate text embeddings for each generation per prompt
snake_case = text_embeddings.repeat_interleave(_A , dim=0 )
# set timesteps
snake_case = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case = {}
if accepts_offset:
snake_case = 1
self.scheduler.set_timesteps(_A , **_A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case = self.get_timesteps(_A , _A , self.device )
snake_case = timesteps[:1].repeat(_A )
# Preprocess image
snake_case = preprocess(_A , _A , _A )
snake_case = self.prepare_latents(
_A , _A , _A , text_embeddings.dtype , self.device , _A )
snake_case = preprocess(_A , _A , _A )
snake_case = self.prepare_latents(
_A , _A , _A , text_embeddings.dtype , self.device , _A )
snake_case = slerp(_A , _A , _A )
if clip_guidance_scale > 0:
snake_case = self.get_clip_image_embeddings(_A , _A )
snake_case = self.get_clip_image_embeddings(_A , _A )
snake_case = slerp(
_A , _A , _A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case = content_text_input.input_ids.shape[-1]
snake_case = self.tokenizer([""] , padding="max_length" , max_length=_A , return_tensors="pt" )
snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case = uncond_embeddings.repeat_interleave(_A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case = torch.randn(_A , generator=_A , device="cpu" , dtype=_A ).to(
self.device )
else:
snake_case = torch.randn(_A , generator=_A , device=self.device , dtype=_A )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
# check if the scheduler accepts generator
snake_case = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case = generator
with self.progress_bar(total=_A ):
for i, t in enumerate(_A ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(_A , _A )
# predict the noise residual
snake_case = self.unet(_A , _A , encoder_hidden_states=_A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case = self.cond_fn(
_A , _A , _A , _A , _A , _A , _A , )
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(_A , _A , _A , **_A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case = 1 / 0.1_82_15 * latents
snake_case = self.vae.decode(_A ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(_A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_A , nsfw_content_detected=_A )
| 294
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : int=1_3, _UpperCAmelCase : List[str]=7, _UpperCAmelCase : Any=True, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : Any=9_9, _UpperCAmelCase : List[str]=3_2, _UpperCAmelCase : Optional[int]=5, _UpperCAmelCase : List[Any]=4, _UpperCAmelCase : Optional[int]=3_7, _UpperCAmelCase : Any="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Dict=1_2_8, _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : Optional[Any]=1_6, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : Optional[int]=0.02, _UpperCAmelCase : Optional[int]=3, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : List[Any]=None, ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask
SCREAMING_SNAKE_CASE__ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Any = num_labels
SCREAMING_SNAKE_CASE__ : Any = num_choices
SCREAMING_SNAKE_CASE__ : Dict = scope
def A_ ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, )
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A_ ( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = NezhaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A_ ( self : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Dict, ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : str = NezhaModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, encoder_hidden_states=_UpperCAmelCase, encoder_attention_mask=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, encoder_hidden_states=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A_ ( self : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = NezhaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : List[str], _UpperCAmelCase : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = NezhaForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def A_ ( self : str, _UpperCAmelCase : int, _UpperCAmelCase : List[str], _UpperCAmelCase : str, _UpperCAmelCase : List[str], _UpperCAmelCase : int, _UpperCAmelCase : str, _UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NezhaForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, next_sentence_label=_UpperCAmelCase, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def A_ ( self : Any, _UpperCAmelCase : Dict, _UpperCAmelCase : Optional[int], _UpperCAmelCase : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str], _UpperCAmelCase : int, _UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = NezhaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, start_positions=_UpperCAmelCase, end_positions=_UpperCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : int, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = NezhaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[str] = NezhaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple, _UpperCAmelCase : List[Any], _UpperCAmelCase : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_choices
SCREAMING_SNAKE_CASE__ : str = NezhaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : str = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE__ : Dict = model(
_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ = True
def A_ ( self : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = super()._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase, return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_UpperCAmelCase )
return inputs_dict
def A_ ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = NezhaModelTester(self )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, )
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def A_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def A_ ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def A_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = NezhaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@slow
@require_torch_gpu
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.jit.trace(
_UpperCAmelCase, (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_UpperCAmelCase, os.path.join(_UpperCAmelCase, "bert.pt" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.jit.load(os.path.join(_UpperCAmelCase, "bert.pt" ), map_location=_UpperCAmelCase )
loaded(inputs_dict["input_ids"].to(_UpperCAmelCase ), inputs_dict["attention_mask"].to(_UpperCAmelCase ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : str = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
@slow
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : str = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], _UpperCAmelCase, atol=1E-4 ) )
| 663
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
_lowerCamelCase : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
_lowerCamelCase : Optional[Any] = {'''vinai/bartpho-syllable''': 1_0_2_4}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any="<s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[str]="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Dict="<unk>", _UpperCAmelCase : Tuple="<pad>", _UpperCAmelCase : int="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : Any, ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : Any = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[int] = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : Dict = cnt
cnt += 1
with open(_UpperCAmelCase, "r", encoding="utf-8" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ : int = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ : Tuple = len(self.fairseq_tokens_to_ids )
if str(_UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[str], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Optional[int], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.fairseq_ids_to_tokens )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : List[str], _UpperCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def A_ ( self : List[str], _UpperCAmelCase : str ) -> str:
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Tuple, _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : int = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(_UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 663
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Tuple = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neo'
lowercase = ['past_key_values']
lowercase = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[str] , lowerCamelCase : Any=5_02_57 , lowerCamelCase : Any=20_48 , lowerCamelCase : List[str]=20_48 , lowerCamelCase : List[str]=24 , lowerCamelCase : Dict=[[["global", "local"], 12]] , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Optional[Any]=2_56 , lowerCamelCase : Union[str, Any]="gelu_new" , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Any=5_02_56 , lowerCamelCase : Union[str, Any]=5_02_56 , **lowerCamelCase : List[Any] , ) -> Optional[int]:
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : int = num_layers
lowerCAmelCase_ : Union[str, Any] = num_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : List[Any] = window_size
lowerCAmelCase_ : List[str] = activation_function
lowerCAmelCase_ : Union[str, Any] = resid_dropout
lowerCAmelCase_ : Dict = embed_dropout
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : List[Any] = classifier_dropout
lowerCAmelCase_ : List[Any] = layer_norm_epsilon
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = use_cache
lowerCAmelCase_ : Dict = bos_token_id
lowerCAmelCase_ : List[Any] = eos_token_id
lowerCAmelCase_ : List[str] = attention_types
lowerCAmelCase_ : List[str] = self.expand_attention_types_params(lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : Tuple ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : str , A__ : int , A__ : List[Any] ):
'''simple docstring'''
import torch
lowerCAmelCase_ : Union[str, Any] = input.size()
lowerCAmelCase_ : List[str] = len(A__ )
lowerCAmelCase_ : Optional[int] = shape[dimension]
lowerCAmelCase_ : Dict = torch.arange(0 , A__ , A__ )
lowerCAmelCase_ : Any = torch.div(sizedim - size , A__ , rounding_mode="""floor""" ) + 1
lowerCAmelCase_ : int = torch.arange(A__ ) + low_indices[:min_length][:, None]
lowerCAmelCase_ : int = [slice(A__ )] * rank
lowerCAmelCase_ : str = indices
lowerCAmelCase_ : Optional[Any] = input[s]
lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A__ )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] ):
'''simple docstring'''
import torch
lowerCAmelCase_ : Any = torch.arange(1 , A__ )
lowerCAmelCase_ : int = torch.remainder(A__ , A__ )
lowerCAmelCase_ : Any = remainders == 0
lowerCAmelCase_ : Optional[int] = candidates[divisor_indices]
lowerCAmelCase_ : Tuple = torch.max(A__ )
return largest_divisor, torch.div(A__ , A__ , rounding_mode="""floor""" )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase_ : Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
lowerCAmelCase_ : int = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : Dict = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __lowercase ( self : Tuple ) -> int:
return self._config.num_heads
def __lowercase ( self : Union[str, Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
lowerCAmelCase_ : List[str] = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : Dict = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Any = seqlen + 2
lowerCAmelCase_ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[Any] = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : List[Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : int = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def __lowercase ( self : List[Any] ) -> int:
return 13
| 398
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 398
| 1
|
'''simple docstring'''
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCAmelCase ( __magic_name__ : int = 20 ) -> str:
lowercase : Any =math.comb(__magic_name__ , __magic_name__ )
lowercase : Any =math.comb(NUM_BALLS - BALLS_PER_COLOUR , __magic_name__ )
lowercase : Optional[Any] =NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 92
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , )
| 670
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Any = logging.get_logger(__name__)
A__ : Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = '''speech_to_text'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , A_=1_0000 , A_=12 , A_=2048 , A_=4 , A_=6 , A_=2048 , A_=4 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="relu" , A_=256 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=2 , A_=True , A_=1 , A_=0 , A_=2 , A_=6000 , A_=1024 , A_=2 , A_=(5, 5) , A_=1024 , A_=80 , A_=1 , **A_ , ) -> Optional[Any]:
"""simple docstring"""
_lowercase: List[Any] = vocab_size
_lowercase: Dict = d_model
_lowercase: Optional[int] = encoder_ffn_dim
_lowercase: Any = encoder_layers
_lowercase: int = encoder_attention_heads
_lowercase: Optional[Any] = decoder_ffn_dim
_lowercase: int = decoder_layers
_lowercase: Any = decoder_attention_heads
_lowercase: Any = dropout
_lowercase: int = attention_dropout
_lowercase: Optional[Any] = activation_dropout
_lowercase: Union[str, Any] = activation_function
_lowercase: Optional[Any] = init_std
_lowercase: List[Any] = encoder_layerdrop
_lowercase: Optional[Any] = decoder_layerdrop
_lowercase: Optional[int] = use_cache
_lowercase: Optional[int] = encoder_layers
_lowercase: str = scale_embedding # scale factor will be sqrt(d_model) if True
_lowercase: Optional[Any] = max_source_positions
_lowercase: Tuple = max_target_positions
_lowercase: Optional[Any] = num_conv_layers
_lowercase: int = list(A_ )
_lowercase: List[str] = conv_channels
_lowercase: List[str] = input_feat_per_channel
_lowercase: Union[str, Any] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , **A_ , )
| 272
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
A__ : Optional[Any] = '<<<<<<< This should probably be modified because it mentions: '
A__ : Any = '=======\n>>>>>>>\n'
A__ : int = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
A__ : Union[str, Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
@staticmethod
def lowercase_ ( A_ ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=A_ , required=A_ , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=A_ , required=A_ , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=A_ )
def __init__( self , A_ , A_ , *A_ ) -> Tuple:
"""simple docstring"""
_lowercase: Optional[Any] = get_logger('''datasets-cli/converting''' )
_lowercase: Optional[Any] = tfds_path
_lowercase: Dict = datasets_directory
def lowercase_ ( self ) -> Any:
"""simple docstring"""
if os.path.isdir(self._tfds_path ):
_lowercase: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowercase: Any = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
_lowercase: Dict = os.path.abspath(self._datasets_directory )
self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowercase: Tuple = []
_lowercase: List[str] = []
_lowercase: int = {}
if os.path.isdir(self._tfds_path ):
_lowercase: str = os.listdir(A_ )
else:
_lowercase: Tuple = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'''Looking at file {f_name}''' )
_lowercase: Optional[int] = os.path.join(A_ , A_ )
_lowercase: Optional[Any] = os.path.join(A_ , A_ )
if not os.path.isfile(A_ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(A_ , encoding='''utf-8''' ) as f:
_lowercase: Any = f.readlines()
_lowercase: str = []
_lowercase: Dict = False
_lowercase: Any = False
_lowercase: Tuple = []
for line in lines:
_lowercase: str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase: Optional[Any] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
_lowercase: Any = ''''''
continue
elif "from absl import logging" in out_line:
_lowercase: Dict = '''from datasets import logging\n'''
elif "getLogger" in out_line:
_lowercase: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowercase: int = True
_lowercase: List[str] = list(filter(lambda A_ : e in out_line , A_ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A_ ) + '''\n''' )
out_lines.append(A_ )
out_lines.append(A_ )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase: Tuple = re.sub(A_ , A_ , A_ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase: Optional[int] = re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , A_ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
_lowercase: Dict = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase: Tuple = True
out_lines.append(A_ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase: List[str] = f_name.replace('''.py''' , '''''' )
_lowercase: Dict = os.path.join(A_ , A_ )
_lowercase: Dict = os.path.join(A_ , A_ )
os.makedirs(A_ , exist_ok=A_ )
self._logger.info(f'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A_ )
if needs_manual_update:
with_manual_update.append(A_ )
with open(A_ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(A_ )
self._logger.info(f'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowercase: Optional[int] = os.path.basename(A_ )
_lowercase: List[Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(f'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(A_ , A_ )
except KeyError:
self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 272
| 1
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
UpperCamelCase = VideoMAEConfig()
set_architecture_configs(_snake_case , _snake_case )
if "finetuned" not in model_name:
UpperCamelCase = False
if "finetuned" in model_name:
UpperCamelCase = "huggingface/label-files"
if "kinetics" in model_name:
UpperCamelCase = 4_00
UpperCamelCase = "kinetics400-id2label.json"
elif "ssv2" in model_name:
UpperCamelCase = 1_74
UpperCamelCase = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
UpperCamelCase = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase = {int(_snake_case ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
if "small" in model_name:
UpperCamelCase = 3_84
UpperCamelCase = 15_36
UpperCamelCase = 12
UpperCamelCase = 16
UpperCamelCase = 12
UpperCamelCase = 3
UpperCamelCase = 1_92
UpperCamelCase = 7_68
elif "large" in model_name:
UpperCamelCase = 10_24
UpperCamelCase = 40_96
UpperCamelCase = 24
UpperCamelCase = 16
UpperCamelCase = 12
UpperCamelCase = 8
UpperCamelCase = 5_12
UpperCamelCase = 20_48
elif "huge" in model_name:
UpperCamelCase = 12_80
UpperCamelCase = 51_20
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = 12
UpperCamelCase = 8
UpperCamelCase = 6_40
UpperCamelCase = 25_60
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
if "encoder." in name:
UpperCamelCase = name.replace("encoder." , "" )
if "cls_token" in name:
UpperCamelCase = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
UpperCamelCase = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
UpperCamelCase = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
UpperCamelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
UpperCamelCase = name.replace("attn" , "attention.self" )
if "attn" in name:
UpperCamelCase = name.replace("attn" , "attention.attention" )
if "norm1" in name:
UpperCamelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCamelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCamelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCamelCase = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
UpperCamelCase = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
UpperCamelCase = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
UpperCamelCase = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
UpperCamelCase = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
UpperCamelCase = name.replace("head" , "classifier" )
return name
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_snake_case )
if key.startswith("encoder." ):
UpperCamelCase = key.replace("encoder." , "" )
if "qkv" in key:
UpperCamelCase = key.split("." )
if key.startswith("decoder.blocks" ):
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = int(key_split[2] )
UpperCamelCase = "decoder.decoder_layers."
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = int(key_split[1] )
UpperCamelCase = "videomae.encoder.layer."
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val
return orig_state_dict
def lowerCamelCase__ ( )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase = np.load(_snake_case )
return list(_snake_case )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_videomae_config(_snake_case )
if "finetuned" in model_name:
UpperCamelCase = VideoMAEForVideoClassification(_snake_case )
else:
UpperCamelCase = VideoMAEForPreTraining(_snake_case )
# download original checkpoint, hosted on Google Drive
UpperCamelCase = "pytorch_model.bin"
gdown.cached_download(_snake_case , _snake_case , quiet=_snake_case )
UpperCamelCase = torch.load(_snake_case , map_location="cpu" )
if "model" in files:
UpperCamelCase = files["model"]
else:
UpperCamelCase = files["module"]
UpperCamelCase = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
# verify model on basic input
UpperCamelCase = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
UpperCamelCase = prepare_video()
UpperCamelCase = image_processor(_snake_case , return_tensors="pt" )
if "finetuned" not in model_name:
UpperCamelCase = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
UpperCamelCase = torch.load(_snake_case )
UpperCamelCase = model(**_snake_case )
UpperCamelCase = outputs.logits
UpperCamelCase = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
UpperCamelCase = torch.Size([1, 4_00] )
UpperCamelCase = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
UpperCamelCase = torch.Size([1, 1_74] )
UpperCamelCase = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
UpperCamelCase = torch.Size([1, 14_08, 15_36] )
UpperCamelCase = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
UpperCamelCase = torch.Size([1, 14_08, 15_36] )
UpperCamelCase = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
UpperCamelCase = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
UpperCamelCase = torch.Size([1, 14_08, 15_36] )
UpperCamelCase = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
UpperCamelCase = torch.Size([1, 4_00] )
UpperCamelCase = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
UpperCamelCase = torch.Size([1, 4_00] )
UpperCamelCase = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
UpperCamelCase = torch.Size([1, 4_00] )
UpperCamelCase = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
UpperCamelCase = torch.Size([1, 4_00] )
UpperCamelCase = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
UpperCamelCase = torch.Size([1, 14_08, 15_36] )
UpperCamelCase = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
UpperCamelCase = torch.Size([1, 1_74] )
UpperCamelCase = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
UpperCamelCase = torch.Size([1, 14_08, 15_36] )
UpperCamelCase = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
UpperCamelCase = torch.Size([1, 1_74] )
UpperCamelCase = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F"Model name not supported. Should be one of {model_names}" )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , _snake_case , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
UpperCamelCase = outputs.loss
assert torch.allclose(_snake_case , _snake_case , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model and image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_snake_case )
model.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(_snake_case , organization="nielsr" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 554
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case ( snake_case ):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
class _snake_case ( nn.Module ):
UpperCamelCase__ = 42
UpperCamelCase__ = (16, 32, 96, 256)
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__magic_name__ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
__magic_name__ : Any = self.block_out_channels[i]
__magic_name__ : Optional[int] = self.block_out_channels[i + 1]
__magic_name__ : List[str] = nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Optional[Any] = nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
__magic_name__ : Union[str, Any] = blocks
__magic_name__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a ):
__magic_name__ : List[Any] = self.conv_in(_a )
__magic_name__ : Dict = nn.silu(_a )
for block in self.blocks:
__magic_name__ : Dict = block(_a )
__magic_name__ : int = nn.silu(_a )
__magic_name__ : str = self.conv_out(_a )
return embedding
@flax_register_to_config
class _snake_case ( nn.Module , snake_case , snake_case ):
UpperCamelCase__ = 32
UpperCamelCase__ = 4
UpperCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ = False
UpperCamelCase__ = (320, 640, 1280, 1280)
UpperCamelCase__ = 2
UpperCamelCase__ = 8
UpperCamelCase__ = None
UpperCamelCase__ = 1280
UpperCamelCase__ = 0.0
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
UpperCamelCase__ = True
UpperCamelCase__ = 0
UpperCamelCase__ = "rgb"
UpperCamelCase__ = (16, 32, 96, 256)
def SCREAMING_SNAKE_CASE ( self , _a ):
# init input tensors
__magic_name__ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
__magic_name__ : Dict = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ : int = jnp.ones((1,) , dtype=jnp.intaa )
__magic_name__ : Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__magic_name__ : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__magic_name__ : Optional[int] = jnp.zeros(_a , dtype=jnp.floataa )
__magic_name__ , __magic_name__ : Dict = jax.random.split(_a )
__magic_name__ : str = {"params": params_rng, "dropout": dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.block_out_channels
__magic_name__ : Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__magic_name__ : str = self.num_attention_heads or self.attention_head_dim
# input
__magic_name__ : Tuple = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__magic_name__ : Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__magic_name__ : str = FlaxTimestepEmbedding(_a , dtype=self.dtype )
__magic_name__ : Tuple = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__magic_name__ : Tuple = self.only_cross_attention
if isinstance(_a , _a ):
__magic_name__ : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__magic_name__ : List[Any] = []
__magic_name__ : Union[str, Any] = []
__magic_name__ : Any = block_out_channels[0]
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
__magic_name__ : Optional[int] = output_channel
__magic_name__ : int = block_out_channels[i]
__magic_name__ : List[str] = i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__magic_name__ : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__magic_name__ : List[Any] = FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
__magic_name__ : List[Any] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
__magic_name__ : str = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
__magic_name__ : Any = down_blocks
__magic_name__ : Any = controlnet_down_blocks
# mid
__magic_name__ : Optional[int] = block_out_channels[-1]
__magic_name__ : List[str] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__magic_name__ : Optional[int] = nn.Conv(
_a , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _a , _a , _a , _a , _a = 1.0 , _a = True , _a = False , ):
__magic_name__ : List[str] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__magic_name__ : int = jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
__magic_name__ : Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__magic_name__ : Optional[int] = timesteps.astype(dtype=jnp.floataa )
__magic_name__ : Dict = jnp.expand_dims(_a , 0 )
__magic_name__ : Any = self.time_proj(_a )
__magic_name__ : int = self.time_embedding(_a )
# 2. pre-process
__magic_name__ : Dict = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Any = self.conv_in(_a )
__magic_name__ : List[str] = jnp.transpose(_a , (0, 2, 3, 1) )
__magic_name__ : Tuple = self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
__magic_name__ : Optional[int] = (sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
__magic_name__ , __magic_name__ : List[str] = down_block(_a , _a , _a , deterministic=not train )
else:
__magic_name__ , __magic_name__ : List[Any] = down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__magic_name__ : int = self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
__magic_name__ : Any = ()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
__magic_name__ : Dict = controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__magic_name__ : Optional[Any] = controlnet_down_block_res_samples
__magic_name__ : int = self.controlnet_mid_block(_a )
# 6. scaling
__magic_name__ : Dict = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 124
| 0
|
'''simple docstring'''
def snake_case__ ( a ) -> list[list[float]]:
'''simple docstring'''
snake_case__ = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def snake_case__ ( a , a ) -> list[list[float]]:
'''simple docstring'''
snake_case__ = []
for dlist, weight in zip(a , a ):
snake_case__ = min(a )
snake_case__ = max(a )
snake_case__ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
snake_case__ = F"""Invalid weight of {weight:f} provided"""
raise ValueError(a )
score_lists.append(a )
return score_lists
def snake_case__ ( a ) -> list[float]:
'''simple docstring'''
snake_case__ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
snake_case__ = final_scores[j] + ele
return final_scores
def snake_case__ ( a , a ) -> list[list[float]]:
'''simple docstring'''
snake_case__ = get_data(a )
snake_case__ = calculate_each_score(a , a )
snake_case__ = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 566
|
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ = logging.get_logger(__name__)
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = set()
snake_case__ = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
snake_case__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
snake_case__ = """\n""".join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
snake_case__ = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
snake_case__ = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
snake_case__ = set()
snake_case__ = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( a ) -> int:
'''simple docstring'''
return values.split(""",""" )
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a__ = parser.parse_args()
a__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ = extract_warnings(args.output_dir, args.targets)
a__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 566
| 1
|
# Function to print upper half of diamond (pyramid)
def __snake_case ( lowerCAmelCase_ ) -> List[Any]:
for i in range(0 , lowerCAmelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
for i in range(lowerCAmelCase_ , 0 , -1 ):
for _ in range(lowerCAmelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowerCAmelCase_ ) # upper half
reverse_floyd(lowerCAmelCase_ ) # lower half
if __name__ == "__main__":
print(r"""| /\ | |- | |- |--| |\ /| |-""")
print(r"""|/ \| |- |_ |_ |__| | \/ | |_""")
_A : str = 1
while K:
_A : str = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
_A : Dict = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 100
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=30 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=10 , snake_case=0.02 , ) -> List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = FlaxViTModel(config=snake_case )
_UpperCAmelCase = model(snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase = (self.patch_size, self.patch_size)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxViTForImageClassification(config=snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxViTForImageClassification(snake_case )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCamelCase_ ( self ) -> None:
_UpperCAmelCase = FlaxViTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(snake_case , snake_case )
_UpperCAmelCase = model_class(snake_case )
@jax.jit
def model_jitted(snake_case , **snake_case ):
return model(pixel_values=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
_UpperCAmelCase = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_UpperCAmelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case )
| 573
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = (PNDMScheduler,)
SCREAMING_SNAKE_CASE_ : Tuple = (('num_inference_steps', 5_0),)
def lowerCamelCase__ ( self , **_lowercase ) -> Union[str, Any]:
lowercase_ : Optional[Any] = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_lowercase )
return config
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Dict:
lowercase_ : Optional[int] = dict(self.forward_default_kwargs )
lowercase_ : Any = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Any = self.dummy_sample
lowercase_ : List[Any] = 0.1 * sample
lowercase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : Optional[int] = self.get_scheduler_config(**_lowercase )
lowercase_ : Union[str, Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase_ : Dict = dummy_past_residuals[:]
lowercase_ : List[Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[Any] = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Dict = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) -> Optional[int]:
pass
def lowerCamelCase__ ( self , _lowercase=0 , **_lowercase ) -> Tuple:
lowercase_ : Optional[Any] = dict(self.forward_default_kwargs )
lowercase_ : Union[str, Any] = kwargs.pop('num_inference_steps' , _lowercase )
lowercase_ : Dict = self.dummy_sample
lowercase_ : List[str] = 0.1 * sample
lowercase_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase_ : int = self.get_scheduler_config()
lowercase_ : List[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase_ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase_ : Optional[Any] = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase_ : Optional[Any] = dummy_past_residuals[:]
lowercase_ : List[Any] = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[int] = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase_ : Any = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , **_lowercase ) -> str:
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(**_lowercase )
lowercase_ : Any = scheduler_class(**_lowercase )
lowercase_ : Optional[Any] = 10
lowercase_ : int = self.dummy_model()
lowercase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.prk_timesteps ):
lowercase_ : Optional[int] = model(_lowercase , _lowercase )
lowercase_ : Dict = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
lowercase_ : Tuple = model(_lowercase , _lowercase )
lowercase_ : Any = scheduler.step_plms(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowerCamelCase__ ( self ) -> str:
lowercase_ : int = dict(self.forward_default_kwargs )
lowercase_ : Optional[Any] = kwargs.pop('num_inference_steps' , _lowercase )
for scheduler_class in self.scheduler_classes:
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Any = scheduler_class(**_lowercase )
lowercase_ : int = self.dummy_sample
lowercase_ : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , 'set_timesteps' ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , 'set_timesteps' ):
lowercase_ : Dict = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase_ : Optional[int] = dummy_past_residuals[:]
lowercase_ : Any = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
lowercase_ : Optional[int] = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase_ : Optional[int] = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase ).prev_sample
lowercase_ : Any = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self ) -> Optional[Any]:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(steps_offset=1 )
lowercase_ : Optional[Any] = scheduler_class(**_lowercase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def lowerCamelCase__ ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowerCamelCase__ ( self ) -> int:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowerCamelCase__ ( self ) -> Any:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase )
def lowerCamelCase__ ( self ) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_lowercase )
def lowerCamelCase__ ( self ) -> Optional[int]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
lowercase_ : Optional[int] = 27
for scheduler_class in self.scheduler_classes:
lowercase_ : Any = self.dummy_sample
lowercase_ : List[Any] = 0.1 * sample
lowercase_ : Optional[Any] = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
lowercase_ : int = scheduler.step_prk(_lowercase , _lowercase , _lowercase ).prev_sample
def lowerCamelCase__ ( self ) -> Optional[Any]:
with self.assertRaises(_lowercase ):
lowercase_ : Any = self.scheduler_classes[0]
lowercase_ : Optional[int] = self.get_scheduler_config()
lowercase_ : Optional[int] = scheduler_class(**_lowercase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : List[Any] = self.full_loop()
lowercase_ : Tuple = torch.sum(torch.abs(_lowercase ) )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Dict = self.full_loop(prediction_type='v_prediction' )
lowercase_ : List[str] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def lowerCamelCase__ ( self ) -> int:
# We specify different beta, so that the first alpha is 0.99
lowercase_ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
lowercase_ : Tuple = torch.sum(torch.abs(_lowercase ) )
lowercase_ : List[str] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def lowerCamelCase__ ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
lowercase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
lowercase_ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
lowercase_ : List[Any] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 700
|
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __UpperCAmelCase ( ) -> Generator[int, None, None]:
UpperCamelCase__ : dict[int, int] = {}
UpperCamelCase__ : int = 2
while True:
UpperCamelCase__ : int = factor_map.pop(lowerCamelCase_ , lowerCamelCase_)
if factor:
UpperCamelCase__ : Optional[int] = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ : Dict = factor
else:
UpperCamelCase__ : int = prime
yield prime
prime += 1
def __UpperCAmelCase ( lowerCamelCase_ = 1e1_0) -> int:
UpperCamelCase__ : str = sieve()
UpperCamelCase__ : Optional[int] = 1
while True:
UpperCamelCase__ : Tuple = next(lowerCamelCase_)
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCamelCase_)
n += 2
if __name__ == "__main__":
print(solution())
| 596
|
from __future__ import annotations
import bisect
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
snake_case : Optional[int] = len(__magic_name__ )
while lo < hi:
snake_case : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case : List[str] = mid + 1
else:
snake_case : Tuple = mid
return lo
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
snake_case : Any = len(__magic_name__ )
while lo < hi:
snake_case : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case : Optional[Any] = mid + 1
else:
snake_case : List[Any] = mid
return lo
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
snake_case : List[str] = 0
snake_case : Optional[Any] = len(__magic_name__ ) - 1
while left <= right:
snake_case : Optional[int] = left + (right - left) // 2
snake_case : Any = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case : Optional[Any] = midpoint - 1
else:
snake_case : List[str] = midpoint + 1
return None
def a_ ( __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
snake_case : Tuple = bisect.bisect_left(__magic_name__ , __magic_name__ )
if index != len(__magic_name__ ) and sorted_collection[index] == item:
return index
return None
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
if right < left:
return None
snake_case : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 )
else:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ )
if __name__ == "__main__":
_a : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_a : List[str] = sorted(int(item) for item in user_input.split(','))
_a : str = int(input('Enter a single number to be found in the list:\n'))
_a : Tuple = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 598
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 160
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a
return b
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
lowerCAmelCase__ : List[Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCAmelCase__ : Optional[int] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 160
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase = CLIPImageProcessor()
UpperCamelCase = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCamelCase = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 45
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
_SCREAMING_SNAKE_CASE : Dict = "us-east-1" # defaults region
@dataclass
class _snake_case :
lowerCAmelCase_ : str
lowerCAmelCase_ : Tuple = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
lowerCAmelCase_ : Any = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
lowerCAmelCase_ : Any = {**hyperparameters, "max_steps": 1000}
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return F'{self.framework}-transfromers-test'
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def UpperCamelCase_( snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 400
| 0
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__a = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 547
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict ='mgp-str'
def __init__( self : Optional[Any] , __lowercase : Dict=[32, 128] , __lowercase : Tuple=4 , __lowercase : Optional[Any]=3 , __lowercase : str=27 , __lowercase : Any=38 , __lowercase : str=50257 , __lowercase : List[str]=30522 , __lowercase : Tuple=768 , __lowercase : str=12 , __lowercase : Optional[Any]=12 , __lowercase : str=4.0 , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=False , __lowercase : Union[str, Any]=1E-5 , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : List[str]=False , __lowercase : List[Any]=0.02 , **__lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = image_size
__a = patch_size
__a = num_channels
__a = max_token_length
__a = num_character_labels
__a = num_bpe_labels
__a = num_wordpiece_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = mlp_ratio
__a = distilled
__a = layer_norm_eps
__a = drop_rate
__a = qkv_bias
__a = attn_drop_rate
__a = drop_path_rate
__a = output_aa_attentions
__a = initializer_range
| 547
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.